code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( snake_case__ ):
_a : bool = field(default=snake_case__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_a : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_a : Optional[int] = field(
default=snake_case__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_a : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(_A , _A ):
__lowerCAmelCase = v.to_dict()
return d
| 92 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = RobertaPreLayerNormConfig.from_pretrained(
_A , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
a : Dict = torch.load(hf_hub_download(repo_id=_A , filename='pytorch_model.bin' ) )
a : Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
a : int = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
a : Dict = tensor_value
a : str = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_A , config=_A , state_dict=_A )
model.save_pretrained(_A )
# convert tokenizer
a : int = AutoTokenizer.from_pretrained(_A )
tokenizer.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase: int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 371 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase: List[str] = 'examples/'
lowerCAmelCase: List[Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase: str = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase: str = 'README.md'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a , a : Tuple = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , _A )
a : Dict = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def lowerCamelCase__ ( _A ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def lowerCamelCase__ ( _A , _A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def lowerCamelCase__ ( ):
a : Tuple = '🤗 Transformers currently provides the following architectures'
a : Any = '1. Want to contribute a new model?'
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.readlines()
# Find the start of the list.
a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : List[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def lowerCamelCase__ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : Union[str, Any] = f.read()
a : Tuple = REPLACE_PATTERNS['init'][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCamelCase__ ( _A=False ):
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Any = default_version.base_version
elif patch:
a : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
a : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
a : int = get_version()
a : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a : int = current_version.base_version
# Check with the user we got that right.
a : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
a : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_A )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work() | 96 | 0 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
a__ : int =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , )
a__ : Union[str, Any] =image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a__ : Any =self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__ : List[str] =self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
a__ : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
a__ : str =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Any =self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCAmelCase__ ), "This is a local test"
| 95 |
from math import isqrt, loga
def UpperCAmelCase_ ( __lowerCAmelCase ) -> list[int]:
__lowercase : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Dict = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def UpperCAmelCase_ ( __lowerCAmelCase = 800_800 , __lowerCAmelCase = 800_800 ) -> int:
__lowercase : Tuple = degree * loga(__lowerCAmelCase )
__lowercase : List[str] = int(__lowerCAmelCase )
__lowercase : Optional[Any] = calculate_prime_numbers(__lowerCAmelCase )
__lowercase : Any = 0
__lowercase : int = 0
__lowercase : Tuple = len(__lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 156 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ) -> Tuple:
'''simple docstring'''
A__ = tokenizer
A__ = tokenizer.bos_token_id
A__ = dataset
A__ = seq_length
A__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Tuple:
'''simple docstring'''
A__ = iter(self.dataset )
A__ = True
while more_examples:
A__ , A__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowercase )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ = False
break
A__ = tokenizer(lowercase , truncation=lowercase )["input_ids"]
A__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowercase ) , self.seq_length ):
A__ = all_token_ids[i : i + self.seq_length]
if len(lowercase ) == self.seq_length:
yield torch.tensor(lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> List[str]:
'''simple docstring'''
A__ = {"streaming": True}
A__ = load_dataset(args.dataset_name , split="train" , **SCREAMING_SNAKE_CASE_ )
A__ = ConstantLengthDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , seq_length=args.seq_length )
A__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
model.eval()
A__ = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
A__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ = torch.mean(torch.cat(SCREAMING_SNAKE_CASE_ ) )
try:
A__ = torch.exp(SCREAMING_SNAKE_CASE_ )
except OverflowError:
A__ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase__ = Accelerator()
# Parse configuration
lowerCAmelCase__ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase__ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase__ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowerCAmelCase__ , lowerCAmelCase__ = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 68 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280 | 0 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase : Optional[int] = HfApi()
lowerCAmelCase : Any = {}
# fmt: off
lowerCAmelCase : Dict = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
lowerCAmelCase : Optional[Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
lowerCAmelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
lowerCAmelCase : Tuple = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
lowerCAmelCase : List[Any] = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
lowerCAmelCase : Dict = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
lowerCAmelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
lowerCAmelCase : Dict = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
lowerCAmelCase : Optional[int] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
lowerCAmelCase : Any = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
lowerCAmelCase : Optional[Any] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
lowerCAmelCase : List[str] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
lowerCAmelCase : List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
lowerCAmelCase : Union[str, Any] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase : Optional[int] = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowerCAmelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase : Dict = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase : List[str] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 359 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase = ksize + 1
lowerCamelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(snake_case__ ):
for x in range(snake_case__ ):
# distance from center
lowerCamelCase = x - ksize // 2
lowerCamelCase = y - ksize // 2
# degree to radiant
lowerCamelCase = theta / 1_80 * np.pi
lowerCamelCase = np.cos(_theta )
lowerCamelCase = np.sin(_theta )
# get kernel x
lowerCamelCase = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase : Optional[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
lowerCAmelCase : Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase : Optional[int] = out / out.max() * 255
lowerCAmelCase : Tuple = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 168 | 0 |
from math import sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : str = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Any = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : str = list(range(2 , n + 1 ) )
lowercase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : Tuple = 0
# filters actual prime numbers.
lowercase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : Optional[Any] = 2
lowercase : Any = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Tuple = 0
# prime factorization of 'number'
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Union[str, Any] = 0
# prime factorization of 'number'
lowercase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
lowercase : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
lowercase : Optional[Any] = 0
lowercase : List[Any] = None
# exit variable. for break up the loops
lowercase : Any = True
while i < len_pn and loop:
lowercase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : Union[str, Any] = 0
while numbera != 0:
lowercase : Optional[int] = numbera % numbera
lowercase : Optional[int] = numbera
lowercase : Dict = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Optional[Any] = 0
lowercase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Dict = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
lowercase : List[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
lowercase : Dict = 0
lowercase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : List[str] = p_number_a + 1 # jump to the next number
lowercase : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Tuple = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Optional[int] = ans
ans += fiba
lowercase : Optional[int] = tmp
return ans
| 20 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*snake_case ,**snake_case )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ):
'''simple docstring'''
lowercase : List[Any] = {}
if top_k is not None:
lowercase : int = top_k
return {}, {}, postprocess_params
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = load_image(snake_case )
lowercase : List[Any] = self.image_processor(images=snake_case ,return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.model(**snake_case )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase : Tuple = self.model.config.num_labels
if self.framework == "pt":
lowercase : str = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase : Dict = probs.topk(snake_case )
elif self.framework == "tf":
lowercase : Optional[int] = stable_softmax(model_outputs.logits ,axis=-1 )[0]
lowercase : Union[str, Any] = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase : Tuple = scores.tolist()
lowercase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case ,snake_case )]
| 20 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : int ) -> Dict:
"""simple docstring"""
with open(__magic_name__ ) as metadata_file:
lowercase__ = json.load(__magic_name__ )
lowercase__ = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase__ = torch.load(__magic_name__ , map_location="""cpu""" )
# Load the entity vocab file
lowercase__ = load_entity_vocab(__magic_name__ )
lowercase__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ = AddedToken("""<ent>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
lowercase__ = AddedToken("""<ent2>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowercase__ = LukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
lowercase__ = state_dict["""embeddings.word_embeddings.weight"""]
lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowercase__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ = f'''encoder.layer.{layer_index}.attention.self.'''
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase__ = entity_emb[entity_vocab["""[MASK]"""]]
lowercase__ = LukeModel(config=__magic_name__ ).eval()
lowercase__ , lowercase__ = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if not (len(__magic_name__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__magic_name__ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
lowercase__ = LukeTokenizer.from_pretrained(__magic_name__ , task="""entity_classification""" )
lowercase__ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowercase__ = (39, 42)
lowercase__ = tokenizer(__magic_name__ , entity_spans=[span] , add_prefix_space=__magic_name__ , return_tensors="""pt""" )
lowercase__ = model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
lowercase__ = torch.Size((1, 42, 1024) )
lowercase__ = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowercase__ = torch.Size((1, 42, 768) )
lowercase__ = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase__ = torch.Size((1, 1, 1024) )
lowercase__ = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowercase__ = torch.Size((1, 1, 768) )
lowercase__ = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = {}
with open(__magic_name__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(__magic_name__ ):
lowercase__ , lowercase__ = line.rstrip().split("""\t""" )
lowercase__ = index
return entity_vocab
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
A : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 146 |
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146 | 1 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowercase = '''base_with_context'''
def _snake_case ( snake_case__ : int , snake_case__ : Tuple ):
A = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
A = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
A = weights[F'layers_{lyr_num}']
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
A = ly_weight['attention']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _snake_case ( snake_case__ : Dict , snake_case__ : List[Any] ):
A = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
A = weights[F'layers_{lyr_num}']
A = ly_weight['attention']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] ):
A = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ )
A = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A = weights[F'layers_{lyr_num}']
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
A = ly_weight['self_attention']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = ly_weight['MultiHeadDotProductAttention_0']
A = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
A = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
A = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
A = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _snake_case ( snake_case__ : Dict ):
A = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A = jnp.tree_util.tree_map(onp.array , snake_case__ )
A = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
A = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
A = inference.parse_training_gin_file(snake_case__ , snake_case__ )
A = inference.InferenceModel(args.checkpoint_path , snake_case__ )
A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
A = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
A = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
A = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case__ )
A = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case__ )
A = load_decoder(ta_checkpoint['target']['decoder'] , snake_case__ )
A = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
A = SpectrogramDiffusionPipeline(
notes_encoder=snake_case__ , continuous_encoder=snake_case__ , decoder=snake_case__ , scheduler=snake_case__ , melgan=snake_case__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
_lowercase = parser.parse_args()
main(args) | 74 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 0
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ )
else:
self.assertEqual(tokenizer.do_lower_case , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
UpperCamelCase_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TOKENIZER_MAPPING.values()
UpperCamelCase_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=snake_case__ ) , snake_case__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=snake_case__ )
UpperCamelCase_ = "Hello, world. How are you?"
UpperCamelCase_ = tokenizer.tokenize(snake_case__ )
self.assertEqual("[UNK]" , tokens[0] )
UpperCamelCase_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=snake_case__ )
UpperCamelCase_ = tokenizer.tokenize(snake_case__ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = get_tokenizer_config("bert-base-cased" )
UpperCamelCase_ = config.pop("_commit_hash" , snake_case__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case__ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase_ = get_tokenizer_config(snake_case__ )
self.assertDictEqual(snake_case__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = get_tokenizer_config(snake_case__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def _lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
UpperCamelCase_ = CustomTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
# Can register in two steps
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = CustomTokenizerFast.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = False
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = NewTokenizer
lowercase__ = False
try:
AutoConfig.register("custom" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# If remote code is not set, the default is to use local
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase_ = AutoTokenizer.from_pretrained("bert-base" )
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , revision="aaaaaa" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 128 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase_ = 'sshleifer/bart-tiny-random'
lowerCAmelCase_ = 'patrickvonplaten/t5-tiny-random'
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase_ )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase , *lowercase : Optional[int] = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase , *lowercase : str = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase , *lowercase : Optional[Any] = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase , *lowercase : List[str] = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_ )
| 364 |
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase : str = [p / w for p, w in zip(__magic_name__ , __magic_name__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase : str = sorted(__magic_name__ )
# declaring useful variables
lowercase : Union[str, Any] = len(__magic_name__ )
lowercase : Optional[int] = 0
lowercase : Optional[int] = 0
lowercase : int = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase : Optional[int] = sorted_profit_by_weight[length - i - 1]
lowercase : Union[str, Any] = profit_by_weight.index(__magic_name__ )
lowercase : Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCAmelCase_ = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCAmelCase_ = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCAmelCase_ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight) | 116 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 199 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A :
UpperCamelCase__ : Union[str, Any] =XGLMConfig
UpperCamelCase__ : Dict ={}
UpperCamelCase__ : Tuple ='gelu'
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=14 , lowercase_ : Dict=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=99 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=2 , lowercase_ : Dict=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =parent
_lowerCamelCase : Optional[Any] =batch_size
_lowerCamelCase : Optional[int] =seq_length
_lowerCamelCase : Union[str, Any] =is_training
_lowerCamelCase : Tuple =use_input_mask
_lowerCamelCase : str =use_labels
_lowerCamelCase : Any =vocab_size
_lowerCamelCase : List[str] =d_model
_lowerCamelCase : List[Any] =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : List[Any] =ffn_dim
_lowerCamelCase : Optional[Any] =activation_function
_lowerCamelCase : Dict =activation_dropout
_lowerCamelCase : Tuple =attention_dropout
_lowerCamelCase : List[str] =max_position_embeddings
_lowerCamelCase : int =initializer_range
_lowerCamelCase : Optional[int] =None
_lowerCamelCase : Optional[Any] =0
_lowerCamelCase : List[str] =2
_lowerCamelCase : Any =1
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCamelCase : Any =None
if self.use_input_mask:
_lowerCamelCase : str =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] =self.get_config()
_lowerCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : str =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any =config_and_inputs
_lowerCamelCase : Union[str, Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Any =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
UpperCamelCase__ : int =False
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =TFXGLMModelTester(self )
_lowerCamelCase : str =ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int =TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : str , lowercase_ : str=True ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : List[Any] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : int =[2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_lowerCamelCase : Dict =model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_lowerCamelCase : Tuple =tokenizer('Today is a nice day and' , return_tensors='tf' )
_lowerCamelCase : Optional[int] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_lowerCamelCase : List[Any] =model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
_lowerCamelCase : Union[str, Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
_lowerCamelCase : int =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_lowerCamelCase : List[Any] =tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
_lowerCamelCase : int =inputs['input_ids']
_lowerCamelCase : str =model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
_lowerCamelCase : Optional[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : List[str] =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : Tuple =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Dict =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : str =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : str =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : List[str] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 199 | 1 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def lowercase ( _snake_case : str = "" , ) ->Dict:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowercase ( _snake_case : str = "" ) ->int:
"""simple docstring"""
if len(_snake_case ) == 0:
return True
__snake_case : List[str] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__snake_case : dict[str, int] = {}
for character in lower_case_input_str:
__snake_case : List[str] = character_freq_dict.get(_snake_case , 0 ) + 1
__snake_case : Union[str, Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase ( _snake_case : str = "" ) ->List[str]:
"""simple docstring"""
print('''\nFor string = ''' , _snake_case , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE : str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 359 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Tuple = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
from statistics import mean, stdev
def lowerCamelCase__ ( a , a = 3 ) -> list:
_A: Union[str, Any] = min(a )
_A: Tuple = max(a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , a ) for x in data]
def lowerCamelCase__ ( a , a = 3 ) -> list:
_A: Optional[Any] = mean(a )
_A: Any = stdev(a )
# standardize data
return [round((x - mu) / (sigma) , a ) for x in data]
| 301 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''BlipImageProcessor'''
__UpperCamelCase : int = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[Any] = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_A: Tuple = self.tokenizer
_A: Optional[int] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_A: List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_A: Tuple = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_A: str = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def __magic_name__ ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.tokenizer.model_input_names
_A: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301 | 1 |
from manim import *
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = Rectangle(height=0.5,width=0.5 )
A__ = Rectangle(height=0.25,width=0.25 )
A__ = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('CPU',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('GPU',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('Model',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
A__ = Rectangle(height=0.46 / 4,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.02,direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0],direction=lowercase_,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1],direction=lowercase_,buff=0.0 )
self.add(lowercase_ )
model_cpu_arr.append(lowercase_ )
self.add(*lowercase_,*lowercase_,*lowercase_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('Loaded Checkpoint',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowercase_ )
A__ = []
A__ = []
for i, rect in enumerate(lowercase_ ):
A__ = fill.copy().set_fill(lowercase_,opacity=0.7 )
target.move_to(lowercase_ )
ckpt_arr.append(lowercase_ )
A__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowercase_ )
self.add(*lowercase_,*lowercase_ )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model',font_size=1_8,)
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_,lowercase_ )
A__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint',font_size=1_8,)
blue_text.next_to(lowercase_,DOWN * 2.4,aligned_edge=key_text.get_left() )
self.add(lowercase_ )
A__ = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.',font_size=2_4,)
step_a.move_to([2, 2, 0] )
A__ = [meta_mem.copy() for i in range(6 )]
A__ = [meta_mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('Disk',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowercase_,run_time=3 ),Write(lowercase_,run_time=1 ),Create(lowercase_,run_time=1 ) )
A__ = []
for i, rect in enumerate(lowercase_ ):
A__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowercase_,run_time=1.5 ) )
self.play(*lowercase_ )
self.play(FadeOut(lowercase_ ) )
A__ = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.',font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_,run_time=3 ) )
self.play(
FadeOut(lowercase_,lowercase_,*lowercase_,*lowercase_ ),)
self.wait()
| 7 | """simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a__ ):
snake_case__ = (KDPMaDiscreteScheduler,)
snake_case__ = 10
def lowerCamelCase__ ( self : str , **UpperCAmelCase : Dict ):
__lowerCamelCase : Union[str, Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**UpperCAmelCase )
return config
def lowerCamelCase__ ( self : Tuple ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def lowerCamelCase__ ( self : int ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[str] = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCamelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase : int = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase : str = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase : str = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = output.prev_sample
__lowerCamelCase : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
if torch_device == "mps":
return
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase : Optional[int] = self.dummy_model()
__lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase : str = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase : Optional[int] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : int = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = output.prev_sample
__lowerCamelCase : Optional[int] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
def lowerCamelCase__ ( self : Dict ):
if torch_device == "mps":
return
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.dummy_model()
__lowerCamelCase : Union[str, Any] = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCamelCase : Optional[int] = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : str = model(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Tuple = output.prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(UpperCAmelCase ) )
__lowerCamelCase : str = torch.mean(torch.abs(UpperCAmelCase ) )
if str(UpperCAmelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1E-3 | 135 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def __lowercase ( __lowercase , __lowercase = 2 , __lowercase = 1 , __lowercase = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowercase , __lowercase , __lowercase ) -> int:
return (pow(lowerCAmelCase__ , 2 ) + step) % modulus
for _ in range(lowerCAmelCase__ ):
# These track the position within the cycle detection logic.
_A = seed
_A = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_A = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_A = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_A = rand_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_A = gcd(hare - tortoise , lowerCAmelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_A = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
lowerCamelCase_ = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 366 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''levit'''
def __init__( self : str , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Any=[128, 256, 384] , __UpperCAmelCase : Optional[Any]=[4, 8, 12] , __UpperCAmelCase : Dict=[4, 4, 4] , __UpperCAmelCase : Union[str, Any]=[16, 16, 16] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : str=[2, 2, 2] , __UpperCAmelCase : Optional[Any]=[2, 2, 2] , __UpperCAmelCase : int=0.02 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = image_size
_A = num_channels
_A = kernel_size
_A = stride
_A = padding
_A = hidden_sizes
_A = num_attention_heads
_A = depths
_A = key_dim
_A = drop_path_rate
_A = patch_size
_A = attention_ratio
_A = mlp_ratio
_A = initializer_range
_A = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return 1E-4
| 174 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 104 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {'''bert_for_seq_generation''': 512}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Tuple="<s>" ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : str="<unk>" ,lowercase__ : Tuple="<pad>" ,lowercase__ : Union[str, Any]="<::::>" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : Any ,):
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[int] ,lowercase__ : Optional[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 104 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
super().__init__()
lowerCAmelCase_ : str = nn.Linear(3 , 4 )
lowerCAmelCase_ : Tuple = nn.BatchNormad(4 )
lowerCAmelCase_ : Dict = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_ ) ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , model.state_dict() )
lowerCAmelCase_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , 'index.json' )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCAmelCase_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , F"{key}.dat" )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
# TODO: add tests on the fact weights are properly loaded
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCAmelCase_ : Tuple = torch.randn(2 , 3 , dtype=SCREAMING_SNAKE_CASE_ )
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Union[str, Any] = offload_weight(SCREAMING_SNAKE_CASE_ , 'weight' , SCREAMING_SNAKE_CASE_ , {} )
lowerCAmelCase_ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , 'weight.dat' )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'weight': {'shape': [2, 3], 'dtype': str(SCREAMING_SNAKE_CASE_ ).split('.' )[1]}} )
lowerCAmelCase_ : Tuple = load_offloaded_weight(SCREAMING_SNAKE_CASE_ , index['weight'] )
self.assertTrue(torch.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = ModelForTest()
lowerCAmelCase_ : Any = model.state_dict()
lowerCAmelCase_ : str = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowerCAmelCase_ : int = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = OffloadedWeightsLoader(state_dict=SCREAMING_SNAKE_CASE_ , save_folder=SCREAMING_SNAKE_CASE_ )
# Every key is there with the right value
self.assertEqual(sorted(SCREAMING_SNAKE_CASE_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , weight_map[key] ) )
lowerCAmelCase_ : List[str] = {k: v for k, v in state_dict.items() if 'weight' in k}
lowerCAmelCase_ : List[str] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = OffloadedWeightsLoader(state_dict=SCREAMING_SNAKE_CASE_ , save_folder=SCREAMING_SNAKE_CASE_ )
# Every key is there with the right value
self.assertEqual(sorted(SCREAMING_SNAKE_CASE_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Duplicates are removed
lowerCAmelCase_ : List[Any] = OffloadedWeightsLoader(state_dict=SCREAMING_SNAKE_CASE_ , save_folder=SCREAMING_SNAKE_CASE_ )
# Every key is there with the right value
self.assertEqual(sorted(SCREAMING_SNAKE_CASE_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , weight_map[key] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowerCAmelCase_ : Union[str, Any] = extract_submodules_state_dict(SCREAMING_SNAKE_CASE_ , ['a.1', 'a.2'] )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'a.1': 0, 'a.2': 2} )
lowerCAmelCase_ : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowerCAmelCase_ : Tuple = extract_submodules_state_dict(SCREAMING_SNAKE_CASE_ , ['a.1', 'a.2'] )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'a.1.a': 0, 'a.2.a': 2} )
| 363 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase_ : str = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
lowerCAmelCase_ : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : List[str] = [1, 3, 0, 5, 8, 5]
lowercase__ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 289 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase_ ( _A : Union[str, Any]=None ):
"""simple docstring"""
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser("env" )
else:
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=__SCREAMING_SNAKE_CASE , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Any = torch.__version__
lowerCamelCase__ : Union[str, Any] = torch.cuda.is_available()
lowerCamelCase__ : Union[str, Any] = is_xpu_available()
lowerCamelCase__ : List[Any] = is_npu_available()
lowerCamelCase__ : List[Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : int = load_config_from_file(args.config_file ).to_dict()
lowerCamelCase__ : Optional[int] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(__SCREAMING_SNAKE_CASE ),
"PyTorch NPU available": str(__SCREAMING_SNAKE_CASE ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
lowerCamelCase__ : Dict = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
lowerCamelCase__ : List[Any] = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else F"\t{accelerate_config}"
)
print(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str = accelerate_config
return info
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : str = env_command_parser()
lowerCamelCase__ : Optional[Any] = parser.parse_args()
env_command(__SCREAMING_SNAKE_CASE )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 184 |
import sys
from collections import defaultdict
class A_ :
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase = 2 * start + 1
else:
lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase , lowercase = heap[smallest_child], positions[smallest_child]
lowercase , lowercase = (
heap[start],
positions[start],
)
lowercase , lowercase = temp, tempa
lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case )
self.top_to_bottom(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = position[index]
while index != 0:
lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase = heap[parent]
lowercase = position[parent]
self.set_position(position[parent] , snake_case )
else:
lowercase = val
lowercase = temp
self.set_position(snake_case , snake_case )
break
lowercase = parent
else:
lowercase = val
lowercase = temp
self.set_position(snake_case , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = len(snake_case ) // 2 - 1
for i in range(snake_case , -1 , -1 ):
self.top_to_bottom(snake_case , snake_case , len(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = positions[0]
lowercase = sys.maxsize
self.top_to_bottom(snake_case , 0 , len(snake_case ) , snake_case )
return temp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = Heap()
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = [-1] * len(__SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase = [] # Heap of Distance of vertices from their neighboring vertex
lowercase = []
for vertex in range(len(__SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(__SCREAMING_SNAKE_CASE )
heap.node_position.append(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = 1
lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase = 0
lowercase = distance
heap.heapify(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for _ in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase = heap.delete_minimum(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__SCREAMING_SNAKE_CASE )]
):
lowercase = distance
heap.bottom_to_top(
__SCREAMING_SNAKE_CASE , heap.get_position(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 195 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=12 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0 , _UpperCAmelCase=None , ):
lowercase__: Tuple = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Union[str, Any] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: int = use_input_mask
lowercase__: List[str] = use_labels
lowercase__: Any = vocab_size
lowercase__: Dict = hidden_size
lowercase__: List[str] = projection_dim
lowercase__: str = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Union[str, Any] = intermediate_size
lowercase__: Union[str, Any] = dropout
lowercase__: Optional[int] = attention_dropout
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: List[Any] = initializer_range
lowercase__: Any = scope
lowercase__: Optional[Any] = bos_token_id
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[int] = None
if self.use_input_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__: List[str] = input_mask.numpy()
lowercase__, lowercase__: int = input_mask.shape
lowercase__: List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
lowercase__: Any = 1
lowercase__: List[Any] = 0
lowercase__: Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase )
def _snake_case ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = TFBlipTextModel(config=_UpperCAmelCase )
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , training=_UpperCAmelCase )
lowercase__: Tuple = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self ):
lowercase__: Tuple = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: List[str] = config_and_inputs
lowercase__: str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (TFBlipTextModel,) if is_tf_available() else ()
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :str = False
_UpperCAmelCase :List[str] = False
def _snake_case ( self ):
lowercase__: int = BlipTextModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Dict = TFBlipTextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
| 2 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Dict = 42
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
lowercase__ = namedtuple("CoinsDistribResult", "moves excess")
def UpperCamelCase( UpperCAmelCase_ ):
if root is None:
return 0
# Validation
def count_nodes(UpperCAmelCase_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCAmelCase_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(UpperCAmelCase_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase : int = get_distrib(node.left )
UpperCAmelCase : List[str] = get_distrib(node.right )
UpperCAmelCase : Any = 1 - left_distrib_excess
UpperCAmelCase : Tuple = 1 - right_distrib_excess
UpperCAmelCase : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
UpperCAmelCase : Dict = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = """▁"""
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =BigBirdTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''<s>'''
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(a_ ) , 10_04 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_rust_tokenizer()
__snake_case : Dict = '''I was born in 92000, and this is falsé.'''
__snake_case : int = tokenizer.tokenize(a_ )
__snake_case : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Optional[int] = tokenizer.encode(a_ )
__snake_case : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer(a_ , keep_accents=a_ )
__snake_case : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''Hello World!'''
__snake_case : List[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case : Optional[int] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Tuple = ''' '''.join(a_ )
__snake_case : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
__snake_case : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__snake_case : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 102 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = credit_card_number
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :str = len(lowercase__ ) - 2
for i in range(lowercase__ , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase_ :int = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowerCAmelCase_ :str = cc_number[:i] + str(lowercase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowercase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :int = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 1_3 <= len(lowercase__ ) <= 1_6:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowercase__ ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowercase__ ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_SCREAMING_SNAKE_CASE : Any = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 200 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowercase = " ") -> Tuple:
'''simple docstring'''
a__ : Tuple = sentence_delimiter
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return list(lowercase)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = []
for sent_idx, sentence in enumerate(lowercase):
chars.extend(self.process_string(lowercase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase) - 1:
chars.append(self.sentence_delimiter)
return chars
lowercase : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase : List[Any] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowercase : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowercase : Optional[Any] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
a__ : Optional[int] = 0
a__ : str = 0
for prediction, reference in zip(lowercase , lowercase):
a__ : Optional[int] = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 99 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase__ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
SCREAMING_SNAKE_CASE_ = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' , return_all_scores=_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
SCREAMING_SNAKE_CASE_ = text_classifier(['This is great !', 'Something else'] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
SCREAMING_SNAKE_CASE_ = text_classifier(['This is great !', 'Something else'] , return_all_scores=_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE_ = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = pipeline('text-classification' )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = pipeline('text-classification' , framework='tf' )
SCREAMING_SNAKE_CASE_ = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = TextClassificationPipeline(model=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE_ = 'HuggingFace is in'
SCREAMING_SNAKE_CASE_ = text_classifier(_lowerCAmelCase )
self.assertEqual(nested_simplify(_lowerCAmelCase ) , [{'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
SCREAMING_SNAKE_CASE_ = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE_ = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )}, {'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE_ = text_classifier(_lowerCAmelCase , top_k=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [[{'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )}] * N, [{'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )}] * N] , )
SCREAMING_SNAKE_CASE_ = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE_ = text_classifier(_lowerCAmelCase )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE_ = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(_lowerCAmelCase ):
text_classifier(_lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE_ = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{'label': ANY(_lowerCAmelCase ), 'score': ANY(_lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) | 210 |
import argparse
import os
import re
lowerCamelCase__ : str = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowerCamelCase__ : Tuple = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ : str = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ : int = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ : int = re.compile(r'\[([^\]]+)\]')
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = _re_indent.search(__UpperCAmelCase )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]="" , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None ) -> Dict:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__UpperCAmelCase ):
index += 1
SCREAMING_SNAKE_CASE_ = ['\n'.join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE_ = [lines[index]]
index += 1
while index < len(__UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__UpperCAmelCase ) )
if index < len(__UpperCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_ = [lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE_ = []
else:
blocks.append('\n'.join(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCAmelCase ) > 0:
blocks.append('\n'.join(__UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCAmelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
def _inner(__UpperCAmelCase : Tuple ):
return key(__UpperCAmelCase ).lower().replace('_' , '' )
return _inner
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=None ) -> Dict:
# If no key is provided, we use a noop.
def noop(__UpperCAmelCase : Dict ):
return x
if key is None:
SCREAMING_SNAKE_CASE_ = noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE_ = [obj for obj in objects if key(__UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE_ = [obj for obj in objects if key(__UpperCAmelCase )[0].isupper() and not key(__UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE_ = [obj for obj in objects if not key(__UpperCAmelCase )[0].isupper()]
SCREAMING_SNAKE_CASE_ = ignore_underscore(__UpperCAmelCase )
return sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(__UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
SCREAMING_SNAKE_CASE_ = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE_ = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__UpperCAmelCase )] ) + "]"
SCREAMING_SNAKE_CASE_ = import_statement.split('\n' )
if len(__UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE_ = 2 if lines[1].strip() == '[' else 1
SCREAMING_SNAKE_CASE_ = [(i, _re_strip_line.search(__UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE_ = sort_objects(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] )
SCREAMING_SNAKE_CASE_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE_ = _re_bracket_content.sub(_replace , lines[1] )
else:
SCREAMING_SNAKE_CASE_ = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE_ = keys[:-1]
SCREAMING_SNAKE_CASE_ = get_indent(lines[1] ) + ', '.join([f"\"{k}\"" for k in sort_objects(__UpperCAmelCase )] )
return "\n".join(__UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE_ = _re_bracket_content.sub(_replace , __UpperCAmelCase )
return import_statement
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any=True ) -> Any:
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE_ = split_code_in_indented_blocks(
__UpperCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE_ = main_blocks[block_idx]
SCREAMING_SNAKE_CASE_ = block.split('\n' )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE_ = 0
while line_idx < len(__UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE_ = '\n'.join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE_ = split_code_in_indented_blocks(__UpperCAmelCase , indent_level=__UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE_ = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE_ = [(pattern.search(__UpperCAmelCase ).groups()[0] if pattern.search(__UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE_ = [(i, key) for i, key in enumerate(__UpperCAmelCase ) if key is not None]
SCREAMING_SNAKE_CASE_ = [x[0] for x in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__UpperCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE_ = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCAmelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any]=True ) -> List[str]:
SCREAMING_SNAKE_CASE_ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE_ = sort_imports(os.path.join(__UpperCAmelCase , '__init__.py' ) , check_only=__UpperCAmelCase )
if result:
SCREAMING_SNAKE_CASE_ = [os.path.join(__UpperCAmelCase , '__init__.py' )]
if len(__UpperCAmelCase ) > 0:
raise ValueError(f"Would overwrite {len(__UpperCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCamelCase__ : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 210 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar("""T""")
SCREAMING_SNAKE_CASE_ = TypeVar("""U""")
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = key
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __repr__( self : Dict ) -> List[str]:
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.rear, self.head
def __repr__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""DoubleLinkedList"""]
SCREAMING_SNAKE_CASE = self.head
while node.next is not None:
rep.append(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = previous
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = self.rear
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE = node.next
SCREAMING_SNAKE_CASE = node.prev
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
return node
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
__snake_case : List[Any] = {}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DoubleLinkedList()
SCREAMING_SNAKE_CASE = capacity
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = {}
def __repr__( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Optional[int] ,lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE = self.cache[key]
SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase__ )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCamelCase__ ,UpperCamelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE = value
self.list.add(UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str ,lowerCamelCase__ : Tuple = 128 ) -> Union[str, Any]:
'''simple docstring'''
def cache_decorator_inner(lowerCamelCase__ : List[str] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase__ : Optional[int] ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE = LRUCache(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE = func(*UpperCamelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] ,UpperCamelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase__ ,"""cache_info""" ,UpperCamelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
'''simple docstring'''
@staticmethod
def __A ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
A_ = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
A_ = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
A_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
{'''score''': 0.333, '''label''': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@slow
@require_torch
def __A ( self ) -> Any:
A_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
A_ = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __A ( self ) -> Tuple:
A_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
A_ = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 363 | '''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.pruning_method
lowercase__ = args.threshold
lowercase__ = args.model_name_or_path.rstrip('''/''' )
lowercase__ = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ = torch.load(os.path.join(__snake_case , '''pytorch_model.bin''' ) )
lowercase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ = MagnitudeBinarizer.apply(inputs=__snake_case , threshold=__snake_case )
lowercase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[F"""{prefix_}mask_scores"""]
lowercase__ = TopKBinarizer.apply(__snake_case , __snake_case )
lowercase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[F"""{prefix_}mask_scores"""]
lowercase__ = ThresholdBinarizer.apply(__snake_case , __snake_case , __snake_case )
lowercase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[F"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ = -0.1, 1.1
lowercase__ = torch.sigmoid(__snake_case )
lowercase__ = s * (r - l) + l
lowercase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowercase__ = os.path.join(
os.path.dirname(__snake_case ) , F"""bertarized_{os.path.basename(__snake_case )}""" )
if not os.path.isdir(__snake_case ):
shutil.copytree(__snake_case , __snake_case )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__snake_case , os.path.join(__snake_case , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
A__ : Optional[Any] = parser.parse_args()
main(args)
| 207 |
'''simple docstring'''
def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in table:
res += inp[i - 1]
return res
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
return data[1:] + data[0]
def a_ ( __snake_case : str , __snake_case : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase_ =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =message[:4]
lowerCamelCase_ =message[4:]
lowerCamelCase_ =apply_table(__snake_case , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] )
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r
lowerCamelCase_ =apply_table(l + r , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
a_ : Any = input("""Enter 10 bit key: """)
a_ : Any = input("""Enter 8 bit message: """)
a_ : str = [6, 3, 7, 4, 8, 5, 10, 9]
a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ : str = [2, 4, 3, 1]
a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ : List[Any] = apply_table(key, paa_table)
a_ : str = temp[:5]
a_ : Optional[Any] = temp[5:]
a_ : Tuple = left_shift(left)
a_ : Optional[Any] = left_shift(right)
a_ : str = apply_table(left + right, pa_table)
a_ : Optional[Any] = left_shift(left)
a_ : Tuple = left_shift(right)
a_ : Union[str, Any] = left_shift(left)
a_ : List[str] = left_shift(right)
a_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
a_ : Optional[int] = apply_table(message, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : str = temp[4:] + temp[:4]
a_ : List[str] = function(expansion, sa, sa, keya, temp)
a_ : Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ : Optional[int] = apply_table(CT, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : int = temp[4:] + temp[:4]
a_ : int = function(expansion, sa, sa, keya, temp)
a_ : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 75 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
class A :
def __init__(self : List[str] , __UpperCAmelCase : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
os.path.join(__UpperCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCAmelCase__ = Extractor
def lowercase_ (self : str , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCAmelCase__ = os.path.abspath(__UpperCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(__UpperCAmelCase ) )
def lowercase_ (self : int , __UpperCAmelCase : str , __UpperCAmelCase : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__UpperCAmelCase ) and not (os.path.isdir(__UpperCAmelCase ) and os.listdir(__UpperCAmelCase ))
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.extractor.infer_extractor_format(__UpperCAmelCase )
if not extractor_format:
return input_path
UpperCAmelCase__ = self._get_output_path(__UpperCAmelCase )
if self._do_extract(__UpperCAmelCase , __UpperCAmelCase ):
self.extractor.extract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return output_path
class A ( UpperCAmelCase_ ):
@classmethod
@abstractmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : Union[Path, str] , **__UpperCAmelCase : Dict ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
...
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[bytes] = []
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
with open(__UpperCAmelCase , "rb" ) as f:
return f.read(__UpperCAmelCase )
@classmethod
def lowercase_ (cls : Any , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
UpperCAmelCase__ = max(len(__UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
UpperCAmelCase__ = cls.read_magic_number(__UpperCAmelCase , __UpperCAmelCase )
except OSError:
return False
return any(magic_number.startswith(__UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
class A ( UpperCAmelCase_ ):
@classmethod
def lowercase_ (cls : Optional[int] , __UpperCAmelCase : Union[Path, str] , **__UpperCAmelCase : Tuple ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__UpperCAmelCase )
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
def resolved(__UpperCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__UpperCAmelCase ) )
def badpath(__UpperCAmelCase : str , __UpperCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ).startswith(__UpperCAmelCase )
def badlink(__UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCAmelCase__ = resolved(os.path.join(__UpperCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__UpperCAmelCase )
UpperCAmelCase__ = resolved(__UpperCAmelCase )
for finfo in members:
if badpath(finfo.name , __UpperCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__UpperCAmelCase , __UpperCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__UpperCAmelCase , __UpperCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase__ = tarfile.open(__UpperCAmelCase )
tar_file.extractall(__UpperCAmelCase , members=TarExtractor.safemembers(__UpperCAmelCase , __UpperCAmelCase ) )
tar_file.close()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = [b'\x1F\x8B']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(__UpperCAmelCase , "rb" ) as gzip_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowercase_ (cls : Optional[Any] , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(__UpperCAmelCase , magic_number=__UpperCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__UpperCAmelCase , "rb" ) as fp:
UpperCAmelCase__ = _EndRecData(__UpperCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCAmelCase__ = fp.read(__UpperCAmelCase ) # CD is where we expect it to be
if len(__UpperCAmelCase ) == sizeCentralDir:
UpperCAmelCase__ = struct.unpack(__UpperCAmelCase , __UpperCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with zipfile.ZipFile(__UpperCAmelCase , "r" ) as zip_file:
zip_file.extractall(__UpperCAmelCase )
zip_file.close()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(__UpperCAmelCase ) as compressed_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCAmelCase__ = rarfile.RarFile(__UpperCAmelCase )
rf.extractall(__UpperCAmelCase )
rf.close()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCAmelCase__ = zstd.ZstdDecompressor()
with open(__UpperCAmelCase , "rb" ) as ifh, open(__UpperCAmelCase , "wb" ) as ofh:
dctx.copy_stream(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = [b'\x42\x5A\x68']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(__UpperCAmelCase , "rb" ) as compressed_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with pyazr.SevenZipFile(__UpperCAmelCase , "r" ) as archive:
archive.extractall(__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = [b'\x04\x22\x4D\x18']
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__UpperCAmelCase , "rb" ) as compressed_file:
with open(__UpperCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__UpperCAmelCase , __UpperCAmelCase )
class A :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase_ (cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return max(
len(__UpperCAmelCase )
for extractor in cls.extractors.values()
if issubclass(__UpperCAmelCase , __UpperCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__UpperCAmelCase , magic_number_length=__UpperCAmelCase )
except OSError:
return b""
@classmethod
def lowercase_ (cls : Union[str, Any] , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__UpperCAmelCase , )
UpperCAmelCase__ = cls.infer_extractor_format(__UpperCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase_ (cls : str , __UpperCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
UpperCAmelCase__ = cls._get_magic_number_max_length()
UpperCAmelCase__ = cls._read_magic_number(__UpperCAmelCase , __UpperCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__UpperCAmelCase , magic_number=__UpperCAmelCase ):
return extractor_format
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Union[Path, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__UpperCAmelCase ) , exist_ok=__UpperCAmelCase )
# Prevent parallel extractions
UpperCAmelCase__ = str(Path(__UpperCAmelCase ).with_suffix(".lock" ) )
with FileLock(__UpperCAmelCase ):
shutil.rmtree(__UpperCAmelCase , ignore_errors=__UpperCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__UpperCAmelCase , __UpperCAmelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__UpperCAmelCase , )
UpperCAmelCase__ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCAmelCase__ = cls.extractors[extractor_format]
return extractor.extract(__UpperCAmelCase , __UpperCAmelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__UpperCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__UpperCAmelCase ):
return extractor.extract(__UpperCAmelCase , __UpperCAmelCase )
| 364 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
def lowercase_ (self : Tuple , __UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys]
UpperCAmelCase__ = Counter(__UpperCAmelCase )
UpperCAmelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def lowercase_ (self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(__UpperCAmelCase )
return mapping
def lowerCAmelCase_ ( __A ) -> Tuple[Optional[str], str]:
'''simple docstring'''
UpperCAmelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ = full_content[1:].index("---" ) + 1
UpperCAmelCase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__A )
class A ( UpperCAmelCase_ ):
# class attributes
__UpperCAmelCase : Optional[Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowercase_ (cls : List[str] , __UpperCAmelCase : Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(__UpperCAmelCase , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCAmelCase )
else:
return cls()
def lowercase_ (self : int , __UpperCAmelCase : Path ) -> str:
"""simple docstring"""
if path.exists():
with open(__UpperCAmelCase , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ = readme_file.read()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = self._to_readme(__UpperCAmelCase )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__UpperCAmelCase )
def lowercase_ (self : str , __UpperCAmelCase : Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ = _split_yaml_from_readme(__UpperCAmelCase )
UpperCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
UpperCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def lowercase_ (cls : Optional[int] , __UpperCAmelCase : str ) -> "DatasetMetadata":
"""simple docstring"""
UpperCAmelCase__ = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding="utf-8" , ).decode("utf-8" )
UpperCamelCase__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCamelCase__ = ap.parse_args()
UpperCamelCase__ = Path(args.readme_filepath)
UpperCamelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 143 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase ( __A ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
a__ : Any = parser.parse_args()
a__ : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 349 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : int = 0 ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = key
def lowerCamelCase__ ( self : str , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__UpperCAmelCase : Union[str, Any] = """"""
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__UpperCAmelCase : str = """"""
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def lowerCamelCase__ ( self : Any , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 320 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_snake_case = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = list(state_dict.keys() )
for name in state_dict_keys:
_a : str = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_a : Dict = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
_a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_a : Optional[int] = """rwkv.""" + name
_a : Any = weight
return state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_a : Tuple = 5_0_2_7_7
_a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
_a : int = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
_a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : Tuple = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
_a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : List[str] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
_a , _a : List[str] = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
_a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
_a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_a , _a , _a : Any = equationa
_a , _a , _a : Tuple = equationa
# Calculate the determinants of the matrices
_a : int = aa * ba - aa * ba
_a : str = ca * ba - ca * ba
_a : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : Dict = determinant_x / determinant
_a : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 294 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( _a , _a ):
snake_case_ : Tuple = XCLIPTextConfig()
# derive patch size from model name
snake_case_ : Union[str, Any] = model_name.find('''patch''' )
snake_case_ : int = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
snake_case_ : Optional[Any] = XCLIPVisionConfig(patch_size=lowerCamelCase__ , num_frames=lowerCamelCase__ )
if "large" in model_name:
snake_case_ : Dict = 768
snake_case_ : Tuple = 3_072
snake_case_ : Tuple = 12
snake_case_ : Tuple = 1_024
snake_case_ : Any = 4_096
snake_case_ : Tuple = 16
snake_case_ : Dict = 24
snake_case_ : Any = 768
snake_case_ : int = 3_072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ : str = 336
snake_case_ : Optional[int] = XCLIPConfig.from_text_vision_configs(lowerCamelCase__ , lowerCamelCase__ )
if "large" in model_name:
snake_case_ : Dict = 768
return config
def __lowercase ( _a ):
if name == "token_embedding.weight":
snake_case_ : List[Any] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
snake_case_ : Optional[Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
snake_case_ : Optional[Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
snake_case_ : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
snake_case_ : List[Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
snake_case_ : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
snake_case_ : int = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
snake_case_ : List[str] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
snake_case_ : Any = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
snake_case_ : int = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
snake_case_ : Optional[Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
snake_case_ : int = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
snake_case_ : Optional[int] = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
snake_case_ : str = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
snake_case_ : Any = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
snake_case_ : Any = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
snake_case_ : Optional[Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
snake_case_ : Optional[int] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
snake_case_ : List[str] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
snake_case_ : Tuple = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
snake_case_ : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
snake_case_ : str = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def __lowercase ( _a , _a ):
for key in orig_state_dict.copy().keys():
snake_case_ : str = orig_state_dict.pop(lowerCamelCase__ )
if "attn.in_proj" in key:
snake_case_ : Union[str, Any] = key.split('''.''' )
if key.startswith('''visual''' ):
snake_case_ : Tuple = key_split[3]
snake_case_ : Dict = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ : int = val[
:dim, :
]
snake_case_ : int = val[
dim : dim * 2, :
]
snake_case_ : Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ : Tuple = val[
:dim
]
snake_case_ : str = val[
dim : dim * 2
]
snake_case_ : List[str] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ : Optional[Any] = val[
:dim, :
]
snake_case_ : int = val[
dim : dim * 2, :
]
snake_case_ : List[str] = val[
-dim:, :
]
else:
snake_case_ : Tuple = val[:dim]
snake_case_ : List[str] = val[
dim : dim * 2
]
snake_case_ : Optional[Any] = val[-dim:]
elif key.startswith('''mit''' ):
snake_case_ : Union[str, Any] = key_split[2]
snake_case_ : Dict = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ : List[Any] = val[:dim, :]
snake_case_ : Optional[Any] = val[dim : dim * 2, :]
snake_case_ : int = val[-dim:, :]
else:
snake_case_ : int = val[:dim]
snake_case_ : Optional[int] = val[dim : dim * 2]
snake_case_ : Dict = val[-dim:]
else:
snake_case_ : List[str] = key_split[2]
snake_case_ : Tuple = config.text_config.hidden_size
if "weight" in key:
snake_case_ : int = val[:dim, :]
snake_case_ : Dict = val[
dim : dim * 2, :
]
snake_case_ : int = val[-dim:, :]
else:
snake_case_ : Dict = val[:dim]
snake_case_ : Union[str, Any] = val[
dim : dim * 2
]
snake_case_ : Dict = val[-dim:]
else:
snake_case_ : Dict = rename_key(lowerCamelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ : str = val.T
snake_case_ : Any = val
return orig_state_dict
def __lowercase ( _a ):
if num_frames == 8:
snake_case_ : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
snake_case_ : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
snake_case_ : Optional[int] = '''eating_spaghetti_32_frames.npy'''
snake_case_ : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowerCamelCase__ , repo_type='''dataset''' , )
snake_case_ : Dict = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def __lowercase ( _a , _a=None , _a=False ):
snake_case_ : Any = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
snake_case_ : List[Any] = model_to_url[model_name]
snake_case_ : int = 8
if "16-frames" in model_name:
snake_case_ : Union[str, Any] = 16
elif "shot" in model_name:
snake_case_ : Tuple = 32
snake_case_ : Union[str, Any] = get_xclip_config(lowerCamelCase__ , lowerCamelCase__ )
snake_case_ : List[str] = XCLIPModel(lowerCamelCase__ )
model.eval()
if "drive" in checkpoint_url:
snake_case_ : int = '''pytorch_model.bin'''
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
snake_case_ : Union[str, Any] = torch.load(lowerCamelCase__ , map_location='''cpu''' )['''model''']
else:
snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ )['''model''']
snake_case_ : Optional[int] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
snake_case_ : Optional[int] = XCLIPModel(lowerCamelCase__ )
snake_case_, snake_case_ : Union[str, Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ : Dict = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
snake_case_ : List[str] = VideoMAEImageProcessor(size=lowerCamelCase__ )
snake_case_ : Any = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
snake_case_ : List[Any] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
snake_case_ : str = XCLIPProcessor(image_processor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
snake_case_ : Tuple = prepare_video(lowerCamelCase__ )
snake_case_ : Union[str, Any] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowerCamelCase__ , return_tensors='''pt''' , padding=lowerCamelCase__ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ : List[str] = model(**lowerCamelCase__ )
# Verify outputs
snake_case_ : Optional[Any] = outputs.logits_per_video
snake_case_ : List[str] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowerCamelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ : Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ : Tuple = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ : Any = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ : Optional[int] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ : Tuple = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ : Optional[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ : List[Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ : Optional[Any] = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ : Dict = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ : Optional[Any] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ : Tuple = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ : int = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ : Dict = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ : Union[str, Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ : Tuple = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowerCamelCase__ , organization='''nielsr''' )
processor.push_to_hub(lowerCamelCase__ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowerCamelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : int = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 350 |
"""simple docstring"""
def __lowercase ( _a ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="attention" ):
"""simple docstring"""
__lowercase =params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__lowercase =params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__lowercase =params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__lowercase =params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
if split_mlp_wi:
__lowercase =params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__lowercase =params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__lowercase =(wi_a, wi_a)
else:
__lowercase =params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__lowercase =params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def _A ( _lowerCAmelCase , *, _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =traverse_util.flatten_dict(variables['target'] )
__lowercase ={'/'.join(_lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase ='encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , _lowerCAmelCase )
__lowercase =collections.OrderedDict()
# Shared embeddings.
__lowercase =old['token_embedder/embedding']
# Encoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
__lowercase =tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
__lowercase , __lowercase , __lowercase , __lowercase =tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , 'attention' )
__lowercase =layer_norm
__lowercase =k.T
__lowercase =o.T
__lowercase =q.T
__lowercase =v.T
# Block i, layer 1 (MLP).
__lowercase =tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
__lowercase , __lowercase =tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , 'encoder' , _lowerCAmelCase )
__lowercase =layer_norm
if split_mlp_wi:
__lowercase =wi[0].T
__lowercase =wi[1].T
else:
__lowercase =wi.T
__lowercase =wo.T
__lowercase =old[
'encoder/relpos_bias/rel_embedding'
].T
__lowercase =old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
__lowercase =tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
__lowercase , __lowercase , __lowercase , __lowercase =tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'self_attention' )
__lowercase =layer_norm
__lowercase =k.T
__lowercase =o.T
__lowercase =q.T
__lowercase =v.T
# Block i, layer 1 (Cross Attention).
__lowercase =tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
__lowercase , __lowercase , __lowercase , __lowercase =tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'encoder_decoder_attention' )
__lowercase =layer_norm
__lowercase =k.T
__lowercase =o.T
__lowercase =q.T
__lowercase =v.T
# Block i, layer 2 (MLP).
__lowercase =tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
__lowercase , __lowercase =tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , 'decoder' , _lowerCAmelCase )
__lowercase =layer_norm
if split_mlp_wi:
__lowercase =wi[0].T
__lowercase =wi[1].T
else:
__lowercase =wi.T
__lowercase =wo.T
__lowercase =old['decoder/decoder_norm/scale']
__lowercase =old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase =old['decoder/logits_dense/kernel'].T
return new
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase =state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase =state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__lowercase =state_dict['shared.weight']
return state_dict
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =checkpoints.load_tax_checkpoint(_lowerCAmelCase )
__lowercase =convert_tax_to_pytorch(_lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=_lowerCAmelCase )
__lowercase =make_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ):
"""simple docstring"""
__lowercase =TaConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase =TaEncoderModel(_lowerCAmelCase )
else:
__lowercase =TaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCAmelCase )
print('Done' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 166 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """blenderbot-small"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : str=5_1_2 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : str=1_6 , _lowerCAmelCase : Optional[int]=8 , _lowerCAmelCase : str=2_0_4_8 , _lowerCAmelCase : Dict=1_6 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : str=0 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Any=2 , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =max_position_embeddings
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =use_cache
__lowercase =encoder_layers
__lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super().outputs
else:
__lowercase =super(_lowerCAmelCase , self).outputs
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Generate decoder inputs
__lowercase =seq_length if not self.use_past else 1
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
__lowercase =common_inputs['decoder_input_ids'].shape[1]
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =decoder_seq_length + 3
__lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1)
__lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase =self.num_layers
__lowercase =min(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers
__lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
))
# TODO: test this.
__lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)))
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase , __lowercase =self.num_layers
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =common_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase)
]
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase)
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
__lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase))
return common_inputs
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
elif self.task == "causal-lm":
__lowercase =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
else:
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
else:
__lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
| 166 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 369 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = []
queue.append(__lowerCamelCase )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
_snake_case = True
_snake_case = u
return visited[t]
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Dict:
# This array is filled by BFS and to store path
_snake_case = [-1] * (len(__lowerCamelCase ))
_snake_case = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = float('''Inf''' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__lowerCamelCase , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a( A : Dict , A : Union[str, Any] , A : Dict , A : Any , A : Tuple , A : Dict , A : int , A : str , A : List[str] , A : Optional[int] , A : str , A : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
a = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
a = input_paths_and_base_extractors[compression_format]
if input_path is None:
a = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
assert base_extractor.is_extractable(__lowerCAmelCase )
a = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a = file_path.read_text(encoding="utf-8" )
else:
a = output_path.read_text(encoding="utf-8" )
a = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a( A : List[Any] , A : List[Any] , A : Tuple , A : Union[str, Any] , A : Dict , A : Any , A : Optional[Any] , A : Any , A : int , A : Dict , A : List[str] , A : Dict , ) -> str:
"""simple docstring"""
a = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
a = input_paths[compression_format]
if input_path is None:
a = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
a = Extractor.infer_extractor_format(__lowerCAmelCase )
assert extractor_format is not None
a = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a = file_path.read_text(encoding="utf-8" )
else:
a = output_path.read_text(encoding="utf-8" )
a = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a( A : Tuple , A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
a = tmp_path / '''data_dot_dot'''
directory.mkdir()
a = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__lowerCAmelCase , "w" ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def a( A : List[str] ) -> int:
"""simple docstring"""
import tarfile
a = tmp_path / '''data_sym_link'''
directory.mkdir()
a = directory / '''tar_file_with_sym_link.tar'''
os.symlink(".." , directory / "subdir" , target_is_directory=__lowerCAmelCase )
with tarfile.TarFile(__lowerCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def a( A : Dict , A : Optional[int] , A : Dict , A : Optional[Any] , A : List[Any] , A : int ) -> Tuple:
"""simple docstring"""
a = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
a = insecure_tar_files[insecure_tar_file]
a = tmp_path / '''extracted'''
TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a( A : List[str] ) -> str:
"""simple docstring"""
a = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
a = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open("wb" ) as f:
f.write(__lowerCAmelCase )
assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
| 227 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = """sew-d"""
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=2 , snake_case__=512 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=("p2c", "c2p") , snake_case__="layer_norm" , snake_case__="gelu_python" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-7 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=0 , snake_case__=1 , snake_case__=2 , **snake_case__ , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase : Union[str, Any] =hidden_size
UpperCAmelCase : Union[str, Any] =feat_extract_norm
UpperCAmelCase : Optional[Any] =feat_extract_activation
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : int =list(snake_case__ )
UpperCAmelCase : List[str] =list(snake_case__ )
UpperCAmelCase : str =conv_bias
UpperCAmelCase : Tuple =num_conv_pos_embeddings
UpperCAmelCase : Dict =num_conv_pos_embedding_groups
UpperCAmelCase : str =len(self.conv_dim )
UpperCAmelCase : Dict =num_hidden_layers
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : List[Any] =squeeze_factor
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : int =position_buckets
UpperCAmelCase : Optional[int] =share_att_key
UpperCAmelCase : Optional[int] =relative_attention
UpperCAmelCase : Tuple =norm_rel_ebd
UpperCAmelCase : List[Any] =list(snake_case__ )
UpperCAmelCase : Dict =hidden_act
UpperCAmelCase : Optional[int] =num_attention_heads
UpperCAmelCase : Any =hidden_dropout
UpperCAmelCase : str =attention_dropout
UpperCAmelCase : Union[str, Any] =activation_dropout
UpperCAmelCase : str =feat_proj_dropout
UpperCAmelCase : Union[str, Any] =final_dropout
UpperCAmelCase : Optional[int] =layer_norm_eps
UpperCAmelCase : str =feature_layer_norm_eps
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Any =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Union[str, Any] =apply_spec_augment
UpperCAmelCase : Optional[Any] =mask_time_prob
UpperCAmelCase : Tuple =mask_time_length
UpperCAmelCase : str =mask_time_min_masks
UpperCAmelCase : Optional[int] =mask_feature_prob
UpperCAmelCase : Optional[Any] =mask_feature_length
UpperCAmelCase : List[Any] =mask_feature_min_masks
# ctc loss
UpperCAmelCase : str =ctc_loss_reduction
UpperCAmelCase : Optional[int] =ctc_zero_infinity
# sequence classification
UpperCAmelCase : Union[str, Any] =use_weighted_layer_sum
UpperCAmelCase : int =classifier_proj_size
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 348 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
lowercase = MBartConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[Any] = eos_token_id
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : List[Any] = bos_token_id
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[str] = TFMBartModel(config=A ).get_decoder()
UpperCAmelCase : int = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : List[str] = inputs_dict["""head_mask"""]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
UpperCAmelCase : Optional[Any] = outputs.to_tuple()
UpperCAmelCase : int = past_key_values[1]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[str]:
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = TFMBartModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase = 'facebook/mbart-large-en-ro'
@cached_property
def _lowercase( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase( self , **A ) -> Any:
UpperCAmelCase : Optional[int] = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , **A , return_tensors="""tf""" )
UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Any = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def _lowercase( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 352 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Tuple = len(_lowercase ) + 1
UpperCAmelCase : List[Any] = len(_lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : str = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
# since string of zero length match pattern of zero length
UpperCAmelCase : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowercase ):
UpperCAmelCase : str = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowercase ):
UpperCAmelCase : Optional[Any] = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowercase ):
for j in range(1 , _lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : Union[str, Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : Optional[int] = dp[i - 1][j]
else:
UpperCAmelCase : Any = 0
else:
UpperCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a : List[str] = """aab"""
a : Optional[int] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 338 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def snake_case_ ( self):
lowercase__ : Optional[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(a , 'tf_padding'))
self.parent.assertTrue(hasattr(a , 'depth_multiplier'))
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=13 , a=3 , a=32 , a=0.25 , a=8 , a=8 , a=6 , a=32 , a=True , a=True , a=True , a="relu6" , a=1280 , a=0.1 , a=0.02 , a=True , a=True , a=10 , a=None , ):
lowercase__ : List[str] = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : str = depth_multiplier
lowercase__ : List[Any] = depth_divisible_by
lowercase__ : Optional[Any] = min_depth
lowercase__ : List[str] = expand_ratio
lowercase__ : Union[str, Any] = tf_padding
lowercase__ : int = output_stride
lowercase__ : List[Any] = first_layer_is_expansion
lowercase__ : List[Any] = finegrained_output
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowercase__ : Dict = classifier_dropout_prob
lowercase__ : Tuple = use_labels
lowercase__ : Optional[int] = is_training
lowercase__ : List[Any] = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = scope
def snake_case_ ( self):
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ : List[Any] = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels)
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , a , a , a , a):
lowercase__ : Union[str, Any] = MobileNetVaModel(config=a)
model.to(a)
model.eval()
lowercase__ : Optional[Any] = model(a)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def snake_case_ ( self , a , a , a , a):
lowercase__ : int = self.num_labels
lowercase__ : Union[str, Any] = MobileNetVaForImageClassification(a)
model.to(a)
model.eval()
lowercase__ : List[Any] = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , a , a , a , a):
lowercase__ : List[str] = self.num_labels
lowercase__ : Tuple = MobileNetVaForSemanticSegmentation(a)
model.to(a)
model.eval()
lowercase__ : List[Any] = model(a)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : str = model(a , labels=a)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self):
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Dict = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Union[str, Any] = False
def snake_case_ ( self):
lowercase__ : Optional[Any] = MobileNetVaModelTester(self)
lowercase__ : Optional[int] = MobileNetVaConfigTester(self , config_class=a , has_text_modality=a)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def snake_case_ ( self):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def snake_case_ ( self):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def snake_case_ ( self):
pass
def snake_case_ ( self):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(a)
lowercase__ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def snake_case_ ( self):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def snake_case_ ( self):
def check_hidden_states_output(a , a , a):
lowercase__ : Dict = model_class(a)
model.to(a)
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a))
lowercase__ : Union[str, Any] = outputs.hidden_states
lowercase__ : Any = 16
self.assertEqual(len(a) , a)
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[int] = True
check_hidden_states_output(a , a , a)
def snake_case_ ( self):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
def snake_case_ ( self):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a)
@slow
def snake_case_ ( self):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = MobileNetVaModel.from_pretrained(a)
self.assertIsNotNone(a)
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@cached_property
def snake_case_ ( self):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def snake_case_ ( self):
lowercase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(a)
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[int] = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
lowercase__ : Dict = model(**a)
# verify the logits
lowercase__ : List[str] = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , a)
lowercase__ : Union[str, Any] = torch.tensor([0.2_445, -1.1_993, 0.1_905]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4))
@slow
def snake_case_ ( self):
lowercase__ : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
lowercase__ : Optional[int] = model.to(a)
lowercase__ : Optional[int] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
lowercase__ : Tuple = prepare_img()
lowercase__ : Tuple = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a)
lowercase__ : int = outputs.logits
# verify the logits
lowercase__ : str = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , a)
lowercase__ : str = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a , atol=1e-4))
| 214 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : str = set()
# Replace all the whitespace in our sentence
lowercase__ : Tuple = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : Dict = [False] * 26
for char in input_str:
if char.islower():
lowercase__ : List[Any] = True
elif char.isupper():
lowercase__ : Optional[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def snake_case__ ( ):
'''simple docstring'''
from timeit import timeit
lowercase__ : Union[str, Any] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_faster()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_fastest()' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 214 | 1 |
"""simple docstring"""
class snake_case :
def __init__( self : str , A : str = "" , A : bool = False ):
'''simple docstring'''
a : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
a : str = is_leaf
a : List[Any] = prefix
def lowerCamelCase__ ( self : Union[str, Any] , A : str ):
'''simple docstring'''
a : Tuple = 0
for q, w in zip(self.prefix , A ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase__ ( self : Any , A : list[str] ):
'''simple docstring'''
for word in words:
self.insert(A )
def lowerCamelCase__ ( self : Union[str, Any] , A : str ):
'''simple docstring'''
if self.prefix == word:
a : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
a : List[Any] = RadixNode(prefix=A , is_leaf=A )
else:
a : Dict = self.nodes[word[0]]
a : Dict = incoming_node.match(
A )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(A )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
a : str = remaining_prefix
a : Tuple = self.nodes[matching_string[0]]
a : List[str] = RadixNode(A , A )
a : str = aux_node
if remaining_word == "":
a : Any = True
else:
self.nodes[matching_string[0]].insert(A )
def lowerCamelCase__ ( self : Tuple , A : str ):
'''simple docstring'''
a : List[Any] = self.nodes.get(word[0] , A )
if not incoming_node:
return False
else:
a : Union[str, Any] = incoming_node.match(
A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(A )
def lowerCamelCase__ ( self : Optional[int] , A : str ):
'''simple docstring'''
a : Tuple = self.nodes.get(word[0] , A )
if not incoming_node:
return False
else:
a : Tuple = incoming_node.match(
A )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(A )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
a : List[str] = list(self.nodes.values() )[0]
a : int = merging_node.is_leaf
self.prefix += merging_node.prefix
a : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
a : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
a : Dict = list(incoming_node.nodes.values() )[0]
a : Dict = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
a : str = merging_node.nodes
return True
def lowerCamelCase__ ( self : Any , A : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case ():
'''simple docstring'''
a : Any = 'banana bananas bandana band apple all beast'.split()
a : Optional[Any] = RadixNode()
root.insert_many(A_ )
assert all(root.find(A_ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def snake_case ():
'''simple docstring'''
assert test_trie()
def snake_case ():
'''simple docstring'''
a : Union[str, Any] = RadixNode()
a : Union[str, Any] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(A_ )
print('Words:' , A_ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(A , 'num_encoder_blocks' ) )
class snake_case :
def __init__( self : List[Any] , A : Dict , A : List[Any]=1_3 , A : str=6_4 , A : Union[str, Any]=3 , A : Union[str, Any]=4 , A : Union[str, Any]=[2, 2, 2, 2] , A : List[str]=[8, 4, 2, 1] , A : Optional[Any]=[1_6, 3_2, 6_4, 1_2_8] , A : Optional[Any]=[1, 4, 8, 1_6] , A : Tuple=[1, 2, 4, 8] , A : Optional[Any]=True , A : Any=True , A : Optional[Any]="gelu" , A : Optional[int]=0.1 , A : List[Any]=0.1 , A : List[str]=0.02 , A : List[Any]=3 , A : str=None , ):
'''simple docstring'''
a : Optional[Any] = parent
a : Optional[Any] = batch_size
a : Optional[Any] = image_size
a : Optional[int] = num_channels
a : List[str] = num_encoder_blocks
a : Optional[Any] = sr_ratios
a : Any = depths
a : Any = hidden_sizes
a : Union[str, Any] = downsampling_rates
a : Any = num_attention_heads
a : int = is_training
a : Dict = use_labels
a : str = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : Dict = num_labels
a : Union[str, Any] = scope
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : int = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , A : str , A : List[Any] , A : List[Any] ):
'''simple docstring'''
a : Optional[Any] = SegformerModel(config=A )
model.to(A )
model.eval()
a : Union[str, Any] = model(A )
a : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase__ ( self : Optional[int] , A : Union[str, Any] , A : str , A : Optional[Any] ):
'''simple docstring'''
a : List[Any] = self.num_labels
a : Optional[int] = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
a : str = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a : int = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase__ ( self : Dict , A : Dict , A : Any , A : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = 1
a : List[Any] = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
a : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A )
a : Dict = model(A , labels=A )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : str = self.prepare_config_and_inputs()
a, a, a : str = config_and_inputs
a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Union[str, Any] = SegformerModelTester(self )
a : Tuple = SegformerConfigTester(self , config_class=A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip('SegFormer does not use inputs_embeds' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a, a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(A )
a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Optional[Any] = True
a : Tuple = False
a : int = True
a : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(A , A ) )
a : Union[str, Any] = outputs.attentions
a : Tuple = sum(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Tuple = True
a : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : str = model(**self._prepare_for_class(A , A ) )
a : Optional[int] = outputs.attentions
self.assertEqual(len(A ) , A )
# verify the first attentions (first block, first layer)
a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a : Tuple = (self.model_tester.image_size // 3_2) ** 2
a : Tuple = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a : str = len(A )
# Check attention is always last and order is fine
a : str = True
a : Tuple = True
a : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 1 , len(A ) )
a : str = outputs.attentions
self.assertEqual(len(A ) , A )
# verify the first attentions (first block, first layer)
a : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(A : Optional[Any] , A : List[str] , A : Union[str, Any] ):
a : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(A , A ) )
a : Tuple = outputs.hidden_states
a : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) , A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : str = True
check_hidden_states_output(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
a : List[Any] = model_class(A )
model.to(A )
model.train()
a : Tuple = self._prepare_for_class(A , A , return_labels=A )
a : Any = model(**A ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Dict = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A )
a : str = prepare_img()
a : List[str] = image_processor(images=A , return_tensors='pt' )
a : List[str] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : Optional[int] = model(A )
a : Any = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , A )
a : str = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(A )
a : List[Any] = prepare_img()
a : Optional[Any] = image_processor(images=A , return_tensors='pt' )
a : int = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : Optional[Any] = model(A )
a : Tuple = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , A )
a : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1 ) )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=A , align=A , do_random_crop=A )
a : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A )
a : int = prepare_img()
a : Any = image_processor(images=A , return_tensors='pt' )
a : List[Any] = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
a : str = model(A )
a : str = outputs.logits.detach().cpu()
a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_0_0, 3_0_0)] )
a : Dict = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , A )
a : int = image_processor.post_process_semantic_segmentation(outputs=A )
a : Any = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , A )
| 186 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : str = [int(lowerCamelCase_) for i in ip_va_address.split('''.''') if i.isdigit()]
return len(lowerCamelCase_) == 4 and all(0 <= int(lowerCamelCase_) <= 254 for octet in octets)
if __name__ == "__main__":
__snake_case : List[Any] =input().strip()
__snake_case : Optional[Any] ='valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 129 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
a : Tuple = PegasusConfig
a : str = {}
a : List[str] = """gelu"""
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=5 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> str:
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a = np.concatenate([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def lowerCAmelCase_ ( self , A , A , A ) -> Dict:
'''simple docstring'''
a = 20
a = model_class_name(A )
a = model.encode(inputs_dict["input_ids"] )
a , a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a = model.init_cache(decoder_input_ids.shape[0] , A , A )
a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
a = model.decode(A , A )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self , A , A , A ) -> Dict:
'''simple docstring'''
a = 20
a = model_class_name(A )
a = model.encode(inputs_dict["input_ids"] )
a , a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a = model.init_cache(decoder_input_ids.shape[0] , A , A )
a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
a = model.decode(A , A , decoder_attention_mask=A )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple:
if attention_mask is None:
a = np.not_equal(__UpperCamelCase , config.pad_token_id).astype(np.inta)
if decoder_attention_mask is None:
a = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Tuple = True
a : Optional[Any] = False
a : Tuple = False
a : List[str] = False
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = FlaxPegasusModelTester(self )
a = ConfigTester(self , config_class=A )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = self._prepare_for_class(A , A )
a = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("JIT Enabled" ):
a = encode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a = model_class(A )
a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("JIT Enabled" ):
a = decode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=A )
a = np.ones((1, 1) )
a = model(A )
self.assertIsNotNone(A )
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
a = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
a = tokenizer(A , return_tensors="np" , truncation=A , max_length=512 , padding=A )
a = model.generate(**A , num_beams=2 ).sequences
a = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 180 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """table-transformer"""
a : Tuple = ["""past_key_values"""]
a : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(A )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class a__ ( UpperCamelCase__ ):
a : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return 12
| 180 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__lowerCAmelCase = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__lowerCAmelCase = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase__ , hypotheses=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ )
}
| 196 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> bool:
if len(snake_case ) == 0:
return False
lowercase__: Any = len(snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case )
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
__lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
__lowerCAmelCase = '''''' if binary_search(sequence, target) else '''not '''
print(F'''{target} was {not_str}found in {sequence}''')
| 196 | 1 |
from math import pow
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCamelCase_ : Optional[Any] = int(pow(lowerCamelCase , lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCamelCase_, UpperCamelCase_ : List[str] = backtrack(
lowerCamelCase , lowerCamelCase , current_number + 1 , lowerCamelCase , lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = backtrack(
lowerCamelCase , lowerCamelCase , current_number + 1 , lowerCamelCase , lowerCamelCase )
return current_sum, solutions_count
def __lowercase ( lowerCamelCase : int , lowerCamelCase : int ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(lowerCamelCase , lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | import baseaa
def __lowercase ( lowerCamelCase : str ):
return baseaa.baaencode(string.encode('utf-8' ) )
def __lowercase ( lowerCamelCase : bytes ):
return baseaa.baadecode(lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
a_ = 'Hello World!'
a_ = baseaa_encode(test)
print(encoded)
a_ = baseaa_decode(encoded)
print(decoded)
| 50 | 1 |
'''simple docstring'''
from PIL import Image
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(UpperCamelCase__ ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__A =change_contrast(img, 1_70)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 163 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=2 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=2 , ):
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : Optional[int] = max_length
UpperCAmelCase__ : int = num_mel_bins
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : str = frequency_stride
UpperCAmelCase__ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ : Dict = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ : Dict = num_patches + 2
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Dict = self.get_config()
return config, input_values, labels
def snake_case__ ( self):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = ASTModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :List[str] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase :List[Any] = False
lowerCAmelCase :Any = False
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :int = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ASTModelTester(self)
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""")
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear))
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase)
UpperCAmelCase__ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = ASTModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
def _UpperCamelCase ( ):
UpperCAmelCase__ : Dict = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ : int = torchaudio.load(UpperCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""")
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.default_feature_extractor
UpperCAmelCase__ : List[str] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""").to(_lowerCamelCase)
UpperCAmelCase__ : str = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ : Dict = prepare_audio()
UpperCAmelCase__ : Dict = audio.squeeze().numpy()
UpperCAmelCase__ : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors="""pt""").to(_lowerCamelCase)
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase)
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , _lowerCamelCase)
UpperCAmelCase__ : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602]).to(_lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4)) | 163 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase__ = np.random.RandomState(UpperCAmelCase_ )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = pipe(**UpperCAmelCase_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCamelCase__ = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = pipe(**UpperCAmelCase_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCamelCase__ = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = pipe(**UpperCAmelCase_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCamelCase__ = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = pipe(**UpperCAmelCase_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCamelCase__ = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = pipe(**UpperCAmelCase_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCamelCase__ = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = pipe(**UpperCAmelCase_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCamelCase__ = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = 3 * [inputs["prompt"]]
# forward
UpperCamelCase__ = pipe(**UpperCAmelCase_ )
UpperCamelCase__ = output.images[0, -3:, -3:, -1]
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase__ = pipe.tokenizer(
UpperCAmelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="""np""" , )
UpperCamelCase__ = text_inputs["input_ids"]
UpperCamelCase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
UpperCamelCase__ = prompt_embeds
# forward
UpperCamelCase__ = pipe(**UpperCAmelCase_ )
UpperCamelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = 3 * ["this is a negative prompt"]
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = 3 * [inputs["prompt"]]
# forward
UpperCamelCase__ = pipe(**UpperCAmelCase_ )
UpperCamelCase__ = output.images[0, -3:, -3:, -1]
UpperCamelCase__ = self.get_dummy_inputs()
UpperCamelCase__ = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase__ = []
for p in [prompt, negative_prompt]:
UpperCamelCase__ = pipe.tokenizer(
UpperCAmelCase_ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="""np""" , )
UpperCamelCase__ = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
UpperCamelCase__ = embeds
# forward
UpperCamelCase__ = pipe(**UpperCAmelCase_ )
UpperCamelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ (self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ort.SessionOptions()
UpperCamelCase__ = False
return options
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
np.random.seed(0 )
UpperCamelCase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = "open neural network exchange"
UpperCamelCase__ = np.random.RandomState(0 )
UpperCamelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type="""np""" )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = "open neural network exchange"
UpperCamelCase__ = np.random.RandomState(0 )
UpperCamelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type="""np""" )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 0
def test_callback_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ = latents[0, -3:, -3:, -1]
UpperCamelCase__ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ = latents[0, -3:, -3:, -1]
UpperCamelCase__ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
UpperCamelCase__ = False
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
UpperCamelCase__ = "Andromeda galaxy in a bottle"
UpperCamelCase__ = np.random.RandomState(0 )
pipe(
prompt=UpperCAmelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase_ (self ):
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert pipe.safety_checker is None
UpperCamelCase__ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
UpperCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 365 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase__ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 178 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase = None )-> None:
"""simple docstring"""
_UpperCAmelCase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = requirement, None, None
else:
_UpperCAmelCase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , __lowerCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
_UpperCAmelCase , _UpperCAmelCase = match[0]
_UpperCAmelCase = want_full.split(',' ) # there could be multiple requirements
_UpperCAmelCase = {}
for w in want_range:
_UpperCAmelCase = re.findall(R'^([\s!=<>]{1,2})(.+)' , __lowerCAmelCase )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
_UpperCAmelCase , _UpperCAmelCase = match[0]
_UpperCAmelCase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_UpperCAmelCase = '.'.join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
_UpperCAmelCase = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 39 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 353 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase_ = TypeVar("""KEY""")
lowercase_ = TypeVar("""VAL""")
@dataclass(frozen=UpperCAmelCase , slots=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ):
_UpperCamelCase : KEY
_UpperCamelCase : VAL
class SCREAMING_SNAKE_CASE (_Item ):
def __init__( self : Optional[int] )-> None:
"""simple docstring"""
super().__init__(a , a )
def __bool__( self : str )-> bool:
"""simple docstring"""
return False
lowercase_ = _DeletedItem()
class SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ):
def __init__( self : Tuple , a : int = 8 , a : float = 0.75 )-> None:
"""simple docstring"""
lowercase__ = initial_block_size
lowercase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ = capacity_factor
lowercase__ = 0
def SCREAMING_SNAKE_CASE_ ( self : Any , a : KEY )-> int:
"""simple docstring"""
return hash(a ) % len(self._buckets )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : KEY , a : VAL )-> bool:
"""simple docstring"""
lowercase__ = self._buckets[ind]
if not stored:
lowercase__ = _Item(a , a )
self._len += 1
return True
elif stored.key == key:
lowercase__ = _Item(a , a )
return True
else:
return False
def SCREAMING_SNAKE_CASE_ ( self : str )-> bool:
"""simple docstring"""
lowercase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> None:
"""simple docstring"""
lowercase__ = self._buckets
lowercase__ = [None] * new_size
lowercase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : KEY )-> Iterator[int]:
"""simple docstring"""
lowercase__ = self._get_bucket_index(a )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ = self._get_next_ind(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : KEY , a : VAL )-> None:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
if self._try_set(a , a , a ):
break
def __setitem__( self : List[Any] , a : KEY , a : VAL )-> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(a , a )
def __delitem__( self : str , a : KEY )-> None:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
lowercase__ = self._buckets[ind]
if item is None:
raise KeyError(a )
if item is _deleted:
continue
if item.key == key:
lowercase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , a : KEY )-> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
lowercase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(a )
def __len__( self : Tuple )-> int:
"""simple docstring"""
return self._len
def __iter__( self : int )-> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Union[str, Any] )-> str:
"""simple docstring"""
lowercase__ = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 269 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[Any] = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / """foo.lock""" ) )
__UpperCAmelCase : Any = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = time.time()
locka.acquire(lowerCAmelCase__ )
assert time.time() - _start > timeout
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = """a""" * 1000 + """.lock"""
__UpperCAmelCase : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowerCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__UpperCAmelCase : Union[str, Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
locka.acquire(0 )
| 254 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
# TODO Update this
_lowerCamelCase : Tuple = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "esm"
def __init__( self : Union[str, Any], _UpperCAmelCase : str=None, _UpperCAmelCase : Dict=None, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : Optional[Any]=7_6_8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : str=3_0_7_2, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : Any=1_0_2_6, _UpperCAmelCase : Optional[int]=0.02, _UpperCAmelCase : Union[str, Any]=1E-12, _UpperCAmelCase : Optional[int]="absolute", _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : str=False, _UpperCAmelCase : int=False, _UpperCAmelCase : str=None, _UpperCAmelCase : Tuple=None, **_UpperCAmelCase : str, ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, mask_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE__ : Tuple = emb_layer_norm_before
SCREAMING_SNAKE_CASE__ : List[Any] = token_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
SCREAMING_SNAKE_CASE__ : List[str] = EsmFoldConfig()
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = EsmFoldConfig(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE__ : List[str] = vocab_list
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", _UpperCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = super().to_dict()
if isinstance(self.esmfold_config, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = 0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = 128
UpperCAmelCase_ = None
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.trunk is None:
SCREAMING_SNAKE_CASE__ : Dict = TrunkConfig()
elif isinstance(self.trunk, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = TrunkConfig(**self.trunk )
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = asdict(self )
SCREAMING_SNAKE_CASE__ : int = self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = 48
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 128
UpperCAmelCase_ = 32
UpperCAmelCase_ = 32
UpperCAmelCase_ = 32
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
UpperCAmelCase_ = 4
UpperCAmelCase_ = 128
UpperCAmelCase_ = None
def A_ ( self : Dict ) -> Dict:
"""simple docstring"""
if self.structure_module is None:
SCREAMING_SNAKE_CASE__ : Dict = StructureModuleConfig()
elif isinstance(self.structure_module, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE__ : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = asdict(self )
SCREAMING_SNAKE_CASE__ : List[Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = 384
UpperCAmelCase_ = 128
UpperCAmelCase_ = 16
UpperCAmelCase_ = 128
UpperCAmelCase_ = 12
UpperCAmelCase_ = 4
UpperCAmelCase_ = 8
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 8
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = 7
UpperCAmelCase_ = 10
UpperCAmelCase_ = 1E-8
UpperCAmelCase_ = 1E5
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
return asdict(self )
def _a ( ) -> List[str]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 351 |
import qiskit
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ : List[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ : int = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 191 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
# TODO: upload to AWS
a_ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""retribert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=8 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=True , __UpperCAmelCase=128 , __UpperCAmelCase=0 , **__UpperCAmelCase , )-> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = share_encoders
lowerCAmelCase__ = projection_dim
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
_snake_case = len(UpperCamelCase__ )
_snake_case = max(UpperCamelCase__ )
_snake_case = min(UpperCamelCase__ )
# create the counting array
_snake_case = coll_max + 1 - coll_min
_snake_case = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
_snake_case = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_snake_case = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
_snake_case = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> List[str]:
'''simple docstring'''
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
UpperCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 295 |
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__A = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
__A = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__A = dict(zip(vocab, range(len(vocab))))
__A = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
__A = Path(tmpdirname)
__A = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
__A = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
__A = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
__A = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__A = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__A = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__A = tokenizer(["Making tiny model"], return_tensors="pt")
__A = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 177 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('''wer''' )
lowerCAmelCase_ = load_metric('''cer''' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(_A )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ''' '''.join(text.split(_A ) )
return text
def __UpperCamelCase ( _A ):
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
lowerCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['''text''']
lowerCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 278 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
a = 'mgp-str'
def __init__(self ,_lowerCamelCase=[32, 128] ,_lowerCamelCase=4 ,_lowerCamelCase=3 ,_lowerCamelCase=27 ,_lowerCamelCase=38 ,_lowerCamelCase=50257 ,_lowerCamelCase=30522 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=4.0 ,_lowerCamelCase=True ,_lowerCamelCase=False ,_lowerCamelCase=1E-5 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=False ,_lowerCamelCase=0.0_2 ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
super().__init__(**a_ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = max_token_length
__lowercase = num_character_labels
__lowercase = num_bpe_labels
__lowercase = num_wordpiece_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = mlp_ratio
__lowercase = distilled
__lowercase = layer_norm_eps
__lowercase = drop_rate
__lowercase = qkv_bias
__lowercase = attn_drop_rate
__lowercase = drop_path_rate
__lowercase = output_aa_attentions
__lowercase = initializer_range
| 359 |
'''simple docstring'''
import heapq
def _lowerCAmelCase ( lowerCamelCase_ : dict ):
__lowercase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 217 | 0 |
"""simple docstring"""
lowercase__ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
lowercase__ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def __lowercase ( _a , _a , _a ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ : Tuple = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(_a )}"
)
raise ValueError(_a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase ( *SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''r''' ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__lowercase = torch.device('''cuda''', local_rank)
__lowercase = socket.gethostname()
__lowercase = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase = dist.get_rank()
__lowercase = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 105 | from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowercase = {'''allegro/herbert-base-cased''': 514}
__lowercase = {}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Union[str, Any] = HerbertTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase="</s>" , **__lowercase , ) -> Optional[Any]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sep_token=__lowercase , **__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[str] = [self.cls_token_id]
__UpperCamelCase :Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Optional[Any] = [self.sep_token_id]
__UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[int] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
| 105 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> bool:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_lowerCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(_lowerCamelCase ) == 1:
return True
_lowerCAmelCase : Dict = series[1] - series[0]
for index in range(len(_lowerCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> float:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_lowerCamelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
_lowerCAmelCase : Union[str, Any] = 0
for val in series:
answer += val
return answer / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = 0
lowercase__: List[Any] = [0]
lowercase__: str = [0]
lowercase__: Tuple = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
lowercase__: Optional[Any] = [60]
lowercase__: Dict = [10]
lowercase__: str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = [1, 2, 3]
lowercase__: Union[str, Any] = [3, 2, 1]
lowercase__: Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 50
lowercase__: str = [60, 100, 120]
lowercase__: Any = [10, 20, 30]
lowercase__: List[Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 196 | 0 |
'''simple docstring'''
import heapq
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__A ,[-1 * len(__A ), (key, value)] )
# chosen_vertices = set of chosen vertices
__UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__UpperCamelCase = heapq.heappop(__A )[1][0]
chosen_vertices.add(__A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__UpperCamelCase = elem[1][1].index(__A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 243 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''Salesforce/blip-image-captioning-base'''
__SCREAMING_SNAKE_CASE = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__SCREAMING_SNAKE_CASE = '''image_captioner'''
__SCREAMING_SNAKE_CASE = AutoModelForVisionaSeq
__SCREAMING_SNAKE_CASE = ['''image''']
__SCREAMING_SNAKE_CASE = ['''text''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> Any:
return self.pre_processor(images=lowercase , return_tensors="""pt""" )
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
return self.model.generate(**lowercase )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return self.pre_processor.batch_decode(lowercase , skip_special_tokens=lowercase )[0].strip()
| 243 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Return True if there is node that has not iterated.
UpperCamelCase :Tuple = [False] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = []
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = True
while queue:
UpperCamelCase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = u
return visited[t]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
# This array is filled by BFS and to store path
UpperCamelCase :Optional[int] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
UpperCamelCase :Optional[int] = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = float('''Inf''' )
UpperCamelCase :str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase :Optional[Any] = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
UpperCamelCase :Any = parent[s]
max_flow += path_flow
UpperCamelCase :Tuple = sink
while v != source:
UpperCamelCase :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase :Any = parent[v]
return max_flow
__snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case , __snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Dict:
UpperCamelCase :Any = parent
UpperCamelCase :Dict = 13
UpperCamelCase :List[Any] = 7
UpperCamelCase :List[Any] = True
UpperCamelCase :Dict = True
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :List[str] = True
UpperCamelCase :Dict = 99
UpperCamelCase :Any = 32
UpperCamelCase :Tuple = 2
UpperCamelCase :Union[str, Any] = 4
UpperCamelCase :List[str] = 37
UpperCamelCase :Dict = '''gelu'''
UpperCamelCase :Dict = 0.1
UpperCamelCase :Tuple = 0.1
UpperCamelCase :Dict = 512
UpperCamelCase :str = 16
UpperCamelCase :Optional[Any] = 2
UpperCamelCase :Dict = 0.02
UpperCamelCase :Optional[int] = 3
UpperCamelCase :int = 4
UpperCamelCase :Dict = None
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Optional[int] = None
if self.use_input_mask:
UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Dict = None
if self.use_token_type_ids:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :Union[str, Any] = None
UpperCamelCase :Optional[int] = None
UpperCamelCase :Any = None
if self.use_labels:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = TFRoFormerModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase :int = [input_ids, input_mask]
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :List[Any] = True
UpperCamelCase :Union[str, Any] = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[Any] = self.num_labels
UpperCamelCase :int = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :List[Any] = self.num_choices
UpperCamelCase :Any = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :Union[str, Any] = self.num_labels
UpperCamelCase :Dict = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Union[str, Any] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple =(
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : Optional[Any] =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Any = TFRoFormerModelTester(self )
UpperCamelCase :Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Tuple = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase :Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ )[0]
# TODO Replace vocab size
UpperCamelCase :Tuple = 5_0000
UpperCamelCase :Optional[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase :int = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =1E-4
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = tf.constant([[4, 10]] )
UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase :str = emba(input_ids.shape )
UpperCamelCase :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Dict = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCamelCase :Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase :Any = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =1E-4
def UpperCAmelCase ( self ) -> List[str]:
# 2,12,16,64
UpperCamelCase :List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase :List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase :int = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase , UpperCamelCase :List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCamelCase :Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
| 259 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : str = "" , snake_case_ : bool = False ):
# Mapping from the first character of the prefix of the node
snake_case__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case__ : List[Any] = is_leaf
snake_case__ : Tuple = prefix
def lowerCamelCase ( self : Optional[int] , snake_case_ : str ):
snake_case__ : List[Any] = 0
for q, w in zip(self.prefix , snake_case_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase ( self : List[str] , snake_case_ : list[str] ):
for word in words:
self.insert(snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
snake_case__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case__ : Any = RadixNode(prefix=snake_case_ , is_leaf=snake_case_ )
else:
snake_case__ : int = self.nodes[word[0]]
snake_case__ : int = incoming_node.match(
snake_case_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case__ : Optional[int] = remaining_prefix
snake_case__ : Dict = self.nodes[matching_string[0]]
snake_case__ : int = RadixNode(snake_case_ , snake_case_ )
snake_case__ : Optional[Any] = aux_node
if remaining_word == "":
snake_case__ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str ):
snake_case__ : Dict = self.nodes.get(word[0] , snake_case_ )
if not incoming_node:
return False
else:
snake_case__ : str = incoming_node.match(
snake_case_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str ):
snake_case__ : Union[str, Any] = self.nodes.get(word[0] , snake_case_ )
if not incoming_node:
return False
else:
snake_case__ : Optional[Any] = incoming_node.match(
snake_case_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case__ : Dict = list(self.nodes.values() )[0]
snake_case__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case__ : Optional[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case__ : Tuple = False
# If there is 1 edge, we merge it with its child
else:
snake_case__ : Optional[Any] = list(incoming_node.nodes.values() )[0]
snake_case__ : Any = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case__ : Any = merging_node.nodes
return True
def lowerCamelCase ( self : Optional[Any] , snake_case_ : int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __snake_case( ) -> bool:
snake_case__ : str = """banana bananas bandana band apple all beast""".split()
snake_case__ : Union[str, Any] = RadixNode()
root.insert_many(_lowerCAmelCase )
assert all(root.find(_lowerCAmelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __snake_case( ) -> None:
assert test_trie()
def __snake_case( ) -> None:
snake_case__ : Optional[int] = RadixNode()
snake_case__ : str = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(_lowerCAmelCase )
print("""Words:""" , _lowerCAmelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : List[Any] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : str = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
snake_case__ : Optional[Any] = str(_lowerCAmelCase )
steps += 1
return steps
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : Optional[int] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : Dict = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
snake_case__ : List[Any] = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[Any] = ["input_values", "padding_mask"]
def __init__( self , a__ = 1 , a__ = 24_000 , a__ = 0.0 , a__ = None , a__ = None , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(feature_size=a__ , sampling_rate=a__ , padding_value=a__ , **a__ )
snake_case_ = chunk_length_s
snake_case_ = overlap
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , a__ , a__ = None , a__ = False , a__ = None , a__ = None , a__ = None , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
snake_case_ = True
snake_case_ = bool(
isinstance(a__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ = [np.asarray(a__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(a__ , np.ndarray ):
snake_case_ = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(a__ ).T]
# verify inputs are valid
for idx, example in enumerate(a__ ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
snake_case_ = None
snake_case_ = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ = min(array.shape[0] for array in raw_audio )
snake_case_ = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ = max(array.shape[0] for array in raw_audio )
snake_case_ = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ = "max_length"
else:
snake_case_ = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ = self.pad(
a__ , max_length=a__ , truncation=a__ , padding=a__ , return_attention_mask=a__ , )
if padding:
snake_case_ = padded_inputs.pop("attention_mask" )
snake_case_ = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
snake_case_ = example[..., None]
input_values.append(example.T )
snake_case_ = input_values
if return_tensors is not None:
snake_case_ = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
| 85 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Optional[int] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__snake_case : List[Any] = model
__snake_case : Tuple = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
__snake_case : Optional[Any] = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__( self : List[Any] , **_lowerCAmelCase : Optional[int] ):
__snake_case : Any = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__snake_case : Union[str, Any] = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def snake_case__ ( self : Dict , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : Union[str, Any] ):
__snake_case : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__snake_case : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
__snake_case : Any = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__snake_case : List[str] = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
__snake_case : List[str] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Union[str, Any] , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Optional[Any] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : int , ):
__snake_case : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
__snake_case : str = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
__snake_case : Dict = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
__snake_case : List[Any] = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
__snake_case : int = Path(_lowerCAmelCase ).parent
__snake_case : Union[str, Any] = Path(_lowerCAmelCase ).name
__snake_case : str = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Optional[int] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : int , ):
__snake_case : List[Any] = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
__snake_case , __snake_case : List[Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 20 | import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(_SCREAMING_SNAKE_CASE ) # fails here
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = dc.update(1 )
UpperCAmelCase : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(2 )
UpperCAmelCase : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = dc.update(3 )
UpperCAmelCase : int = stepped is True and completed is True and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase : Tuple = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 109 |
"""simple docstring"""
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _snake_case ( UpperCamelCase : float ):
assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase )
UpperCAmelCase : str = int(UpperCamelCase )
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : List[str] = False
if decimal < 0:
UpperCAmelCase : Any = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 )
UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal
UpperCAmelCase : int = """0x""" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowercase ( _a , _a , _a=1_024 , _a=1_024 , _a=False , **_a ):
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained(_a )
snake_case_ : List[str] = SeqaSeqDataset(_a , _a , _a , _a , type_path='''train''' , **_a )
snake_case_ : int = tok.pad_token_id
def get_lens(_a ):
snake_case_ : Optional[int] = tqdm(
DataLoader(_a , batch_size=512 , num_workers=8 , shuffle=_a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
snake_case_ : Union[str, Any] = []
for batch in dl:
snake_case_ : List[str] = batch['''input_ids'''].ne(_a ).sum(1 ).tolist()
snake_case_ : Optional[Any] = batch['''labels'''].ne(_a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_a , _a ):
max_lens.append(max(_a , _a ) )
else:
max_lens.extend(_a )
return max_lens
snake_case_ : int = get_lens(_a )
snake_case_ : Optional[Any] = SeqaSeqDataset(_a , _a , _a , _a , type_path='''val''' , **_a )
snake_case_ : Optional[Any] = get_lens(_a )
pickle_save(_a , train_ds.len_file )
pickle_save(_a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ : List[Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''CLIPFeatureExtractor''']
lowercase__ : Any = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 155 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowercase : Tuple = list[list[int]]
# assigning initial values to the grid
__lowercase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowercase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase_ ( _lowercase ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase_ ( _lowercase ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowercase , _lowercase , _lowercase , _lowercase ):
lowerCamelCase_ : Dict = digit
if sudoku(_lowercase ) is not None:
return grid
lowerCamelCase_ : Dict = 0
return None
def lowercase_ ( _lowercase ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowercase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowercase : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 318 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
_snake_case : Any = k.replace(snake_case__ , snake_case__ )
return k
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : dict ):
"""simple docstring"""
_snake_case : str = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
_snake_case : Union[str, Any] = PegasusConfig(**snake_case__ )
_snake_case : Union[str, Any] = PegasusForConditionalGeneration(snake_case__ )
_snake_case : str = torch_model.model.state_dict()
_snake_case : int = {}
for k, v in tf_weights.items():
_snake_case : Optional[int] = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
_snake_case : Dict = v.T
_snake_case : Dict = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
_snake_case : str = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_snake_case : Dict = mapping["""shared.weight"""]
_snake_case : int = mapping["""shared.weight"""]
_snake_case : List[Any] = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**snake_case__ )
_snake_case , _snake_case : Tuple = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
_snake_case : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def UpperCAmelCase__ (snake_case__ : List[str]="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
_snake_case : Any = tf.train.list_variables(snake_case__ )
_snake_case : Any = {}
_snake_case : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(snake_case__ , desc="""converting tf checkpoint to dict""" ):
_snake_case : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_snake_case : int = tf.train.load_variable(snake_case__ , snake_case__ )
_snake_case : str = array
return tf_weights
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Tuple = Path(snake_case__ ).parent.name
_snake_case : str = task_specific_params[F"summarization_{dataset}"]["""max_position_embeddings"""]
_snake_case : Union[str, Any] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
_snake_case : Any = get_tf_weights_as_numpy(snake_case__ )
_snake_case : List[Any] = task_specific_params[F"summarization_{dataset}"]
if dataset == "large":
_snake_case : Optional[Any] = task_specific_params
_snake_case : str = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
_snake_case : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(snake_case__ , Path(snake_case__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ = parser.parse_args()
if args.save_dir is None:
A_ = Path(args.tf_ckpt_path).parent.name
A_ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger()
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = field(default_factory=__a )
lowercase__ = field(default_factory=__a )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: Tensor, a_: Tensor ):
'''simple docstring'''
_snake_case : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self: List[Any], a_: Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0
lowercase__ = field(default_factory=__a )
lowercase__ = field(default_factory=__a )
def __call__( self: Dict, a_: Tensor ):
'''simple docstring'''
_snake_case : Tuple = Tracker(self.dest )(a_ ).parametrized
_snake_case : int = Tracker(self.src )(a_ ).parametrized
_snake_case : Tuple = list(filter(lambda a_ : type(a_ ) not in self.src_skip, a_ ) )
_snake_case : Union[str, Any] = list(filter(lambda a_ : type(a_ ) not in self.dest_skip, a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
f"Numbers of operations are different. Source module has {len(a_ )} operations while"
f" destination module has {len(a_ )}." )
for dest_m, src_m in zip(a_, a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : ResNetConfig , snake_case__ : Path , snake_case__ : bool = True ):
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
_snake_case : Dict = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
_snake_case : List[Any] = ResNetForImageClassification(snake_case__ ).eval()
_snake_case : List[str] = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
_snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
_snake_case : Optional[int] = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
# we can use the convnext one
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def UpperCAmelCase__ (snake_case__ : Path , snake_case__ : str = None , snake_case__ : bool = True ):
"""simple docstring"""
_snake_case : Optional[Any] = """imagenet-1k-id2label.json"""
_snake_case : Optional[Any] = 10_00
_snake_case : str = (1, num_labels)
_snake_case : List[Any] = """huggingface/label-files"""
_snake_case : Union[str, Any] = num_labels
_snake_case : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
_snake_case : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
_snake_case : Optional[int] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ = parser.parse_args()
A_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 132 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( snake_case__ : Tuple ) -> List[str]:
UpperCamelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionLatentUpscalePipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([] )
UpperCAmelCase__ : Optional[int] = True
@property
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = 1
UpperCamelCase : List[str] = 4
UpperCamelCase : List[str] = (16, 16)
UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
def snake_case_ ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=SCREAMING_SNAKE_CASE_, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=SCREAMING_SNAKE_CASE_, only_cross_attention=SCREAMING_SNAKE_CASE_, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
UpperCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
UpperCamelCase : Tuple = EulerDiscreteScheduler(prediction_type='sample' )
UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='quick_gelu', projection_dim=512, )
UpperCamelCase : Any = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
UpperCamelCase : List[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1e-3 )
def snake_case_ ( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_, scheduler_enum.name )
UpperCamelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ )[0]
outputs.append(SCREAMING_SNAKE_CASE_ )
assert check_same_shape(SCREAMING_SNAKE_CASE_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCamelCase : int = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type='latent' ).images
UpperCamelCase : List[str] = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Dict = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCamelCase : str = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 119 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> Union[str, Any]:
UpperCamelCase : str = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : int = do_normalize
def snake_case_ ( self ) -> Tuple:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> int:
UpperCamelCase : str = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'clusters' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : int = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, obj[key] ) )
else:
self.assertEqual(obj[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_, 'image_processor.json' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def snake_case_ ( self ) -> str:
pass
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase : int = Image.open(dataset[4]['file'] )
UpperCamelCase : Optional[Any] = Image.open(dataset[5]['file'] )
UpperCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase : List[str] = prepare_images()
# test non-batched
UpperCamelCase : int = image_processing(images[0], return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1024) )
UpperCamelCase : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), SCREAMING_SNAKE_CASE_ )
# test batched
UpperCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1024) )
UpperCamelCase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), SCREAMING_SNAKE_CASE_ )
| 119 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PegasusTokenizer
_lowerCamelCase = PegasusTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : Optional[int] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''</s>'''
__A : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_a ) , 1103 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A : str = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A : List[str] = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
__A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
__A : str = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__A : int = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
__A : List[str] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__A : Dict = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__A : Union[str, Any] = '''To ensure a smooth flow of bank resolutions.'''
__A : Any = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__A : int = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = ['''This is going to be way too long.''' * 150, '''short example''']
__A : Union[str, Any] = ['''not super long but more than 5 tokens''', '''tiny''']
__A : int = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors='''pt''' )
__A : Any = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PegasusTokenizer
_lowerCamelCase = PegasusTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : Optional[Any] = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__( self ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
__A : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
__A : Any = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = ['''This is going to be way too long.''' * 1000, '''short example''']
__A : Union[str, Any] = ['''not super long but more than 5 tokens''', '''tiny''']
__A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors='''pt''' )
__A : Optional[int] = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
__A : Optional[int] = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 350 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """sew-d"""
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase=2 , __lowerCamelCase=512 , __lowerCamelCase=256 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=("p2c", "c2p") , __lowerCamelCase="layer_norm" , __lowerCamelCase="gelu_python" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-7 , __lowerCamelCase=1e-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase=False , __lowerCamelCase=128 , __lowerCamelCase=16 , __lowerCamelCase=True , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__A : str = hidden_size
__A : List[Any] = feat_extract_norm
__A : Tuple = feat_extract_activation
__A : Dict = list(__lowerCamelCase )
__A : int = list(__lowerCamelCase )
__A : List[Any] = list(__lowerCamelCase )
__A : Any = conv_bias
__A : List[Any] = num_conv_pos_embeddings
__A : Any = num_conv_pos_embedding_groups
__A : Optional[Any] = len(self.conv_dim )
__A : int = num_hidden_layers
__A : Union[str, Any] = intermediate_size
__A : Union[str, Any] = squeeze_factor
__A : int = max_position_embeddings
__A : Tuple = position_buckets
__A : Tuple = share_att_key
__A : List[str] = relative_attention
__A : Optional[Any] = norm_rel_ebd
__A : Dict = list(__lowerCamelCase )
__A : str = hidden_act
__A : List[str] = num_attention_heads
__A : Union[str, Any] = hidden_dropout
__A : Optional[int] = attention_dropout
__A : Optional[Any] = activation_dropout
__A : List[str] = feat_proj_dropout
__A : str = final_dropout
__A : Tuple = layer_norm_eps
__A : int = feature_layer_norm_eps
__A : Optional[int] = initializer_range
__A : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : int = apply_spec_augment
__A : Any = mask_time_prob
__A : Optional[int] = mask_time_length
__A : Any = mask_time_min_masks
__A : int = mask_feature_prob
__A : Tuple = mask_feature_length
__A : Dict = mask_feature_min_masks
# ctc loss
__A : Tuple = ctc_loss_reduction
__A : Union[str, Any] = ctc_zero_infinity
# sequence classification
__A : Tuple = use_weighted_layer_sum
__A : List[str] = classifier_proj_size
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 0 |
from math import factorial, pi
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
UpperCamelCase__ : List[str] = float(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
UpperCamelCase__ : List[Any] = float(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 146 |
from collections import deque
from math import floor
from random import random
from time import time
class __magic_name__ :
def __init__( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = {}
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase__ : List[Any] = [[w, v]]
if not self.graph.get(lowerCamelCase__ ):
UpperCamelCase__ : Any = []
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int=-2 , lowerCamelCase__ : int=-1 ) -> List[Any]:
'''simple docstring'''
if s == d:
return []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Dict = []
if s == -2:
UpperCamelCase__ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[int] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : int = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Optional[int]=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
UpperCamelCase__ : int = floor(random() * 10000 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase__ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Tuple=-2 ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = deque()
UpperCamelCase__ : Optional[Any] = []
if s == -2:
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
UpperCamelCase__ : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : List[str] ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str]=-2 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = []
UpperCamelCase__ : Optional[int] = []
if s == -2:
UpperCamelCase__ : Dict = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = s
UpperCamelCase__ : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : List[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return sorted_nodes
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = -2
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = s
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Optional[int] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : List[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : int = -2
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Tuple = s
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : List[str] = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : List[str] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : List[str] = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Tuple = s
UpperCamelCase__ : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Union[str, Any]=-2 , lowerCamelCase__ : Union[str, Any]=-1 ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : int = time()
return end - begin
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int=-2 ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = time()
self.bfs(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = time()
return end - begin
class __magic_name__ :
def __init__( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = {}
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple=1 ) -> Dict:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase__ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase__ : int = [[w, u]]
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
# the other way round
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Tuple=-2 , lowerCamelCase__ : Tuple=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Tuple = []
if s == -2:
UpperCamelCase__ : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : List[str] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int]=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
UpperCamelCase__ : List[Any] = floor(random() * 10000 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
UpperCamelCase__ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int=-2 ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[Any] = deque()
UpperCamelCase__ : int = []
if s == -2:
UpperCamelCase__ : Dict = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
UpperCamelCase__ : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = -2
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : int = False
UpperCamelCase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Optional[int] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Optional[Any] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : List[str] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
UpperCamelCase__ : Optional[int] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = -2
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = s
UpperCamelCase__ : str = False
UpperCamelCase__ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase__ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase__ : Optional[Any] = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase__ : Optional[Any] = True
if len(lowerCamelCase__ ) != 0:
UpperCamelCase__ : Optional[int] = stack[len(lowerCamelCase__ ) - 1]
else:
UpperCamelCase__ : Tuple = False
indirect_parents.append(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = s
UpperCamelCase__ : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any=-2 , lowerCamelCase__ : str=-1 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = time()
return end - begin
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str=-2 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = time()
self.bfs(lowerCamelCase__ )
UpperCamelCase__ : Any = time()
return end - begin
| 146 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_SCREAMING_SNAKE_CASE )} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> Any:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 104 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_SCREAMING_SNAKE_CASE )} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> Any:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 104 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase_ : int = '''bart'''
UpperCAmelCase_ : int = True
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
UpperCamelCase :Optional[int] = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase :Optional[int] = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase :Any = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
UpperCamelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
UpperCamelCase :Optional[int] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
UpperCamelCase :Optional[Any] = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase :Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase :Tuple = faiss.StandardGpuResources()
UpperCamelCase :Dict = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
UpperCamelCase :str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 )
UpperCamelCase :Optional[int] = faiss.index_cpu_to_gpu(__magic_name__ , 1 , __magic_name__ )
wikiaab_gpu_index_flat.add(__magic_name__ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase :Optional[Any] = (None, None)
UpperCamelCase :List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
UpperCamelCase :Dict = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
UpperCamelCase :List[Any] = elia["""train_eli5"""]
UpperCamelCase :Optional[Any] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
UpperCamelCase :Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__magic_name__ )
return (elia_train, eli5_train_q_index)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_indexes()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = load_models()
UpperCAmelCase_ , UpperCAmelCase_ : str = load_train_data()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Any=10 ) -> Any:
"""simple docstring"""
UpperCamelCase :List[str] = embed_questions_for_retrieval([question] , __magic_name__ , __magic_name__ )
UpperCamelCase , UpperCamelCase :int = eli5_train_q_index.search(__magic_name__ , __magic_name__ )
UpperCamelCase :Any = [elia_train[int(__magic_name__ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str]="wiki40b" , __magic_name__ : str="dense" , __magic_name__ : Tuple=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
UpperCamelCase , UpperCamelCase :Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase :List[Any] = query_qa_dense_index(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
UpperCamelCase , UpperCamelCase :List[Any] = query_es_index(
__magic_name__ , __magic_name__ , index_name="""english_wiki40b_snippets_100w""" , n_results=__magic_name__ , )
UpperCamelCase :str = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
UpperCamelCase :Tuple = """question: {} context: {}""".format(__magic_name__ , __magic_name__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __magic_name__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __magic_name__ : None),
} )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=64 , __magic_name__ : int=256 , __magic_name__ : Dict=False , __magic_name__ : str=2 , __magic_name__ : str=0.95 , __magic_name__ : Dict=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase :Optional[Any] = qa_sas_generate(
__magic_name__ , __magic_name__ , __magic_name__ , num_answers=1 , num_beams=__magic_name__ , min_len=__magic_name__ , max_len=__magic_name__ , do_sample=__magic_name__ , temp=__magic_name__ , top_p=__magic_name__ , top_k=__magic_name__ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCAmelCase_ : List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
UpperCAmelCase_ : Union[str, Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase_ : List[str] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase_ : Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
UpperCAmelCase_ : Tuple = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCAmelCase_ : str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCAmelCase_ : str = action_list.index(action_st)
UpperCAmelCase_ : int = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCAmelCase_ : str = show_type == '''Show full text of passages'''
else:
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCAmelCase_ : Any = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
UpperCAmelCase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCAmelCase_ : List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCAmelCase_ : Optional[Any] = '''wiki40b'''
UpperCAmelCase_ : Any = '''dense'''
UpperCAmelCase_ : int = '''beam'''
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : str = 2_56
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCAmelCase_ : Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCAmelCase_ : int = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase_ : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase_ : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase_ : Optional[int] = None
# start main text
UpperCAmelCase_ : Optional[int] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
UpperCAmelCase_ : List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase_ : Union[str, Any] = st.text_input('''Enter your question here:''', '''''')
else:
UpperCAmelCase_ : Optional[Any] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase_ , UpperCAmelCase_ : Dict = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCAmelCase_ : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase_ : Any = support_list[:10]
UpperCAmelCase_ : Optional[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase_ , UpperCAmelCase_ : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCAmelCase_ : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
UpperCAmelCase_ : List[Any] = res[1].strip()
if sec_titles == "":
UpperCAmelCase_ : Union[str, Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
UpperCAmelCase_ : str = sec_titles.split(''' & ''')
UpperCAmelCase_ : str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase_ : Any = find_nearest_training(question)
UpperCAmelCase_ : Optional[int] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCAmelCase_ : Dict = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCAmelCase_ : int = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 38 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowerCAmelCase )
self.set_fail_transitions()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _snake_case ( self , _lowerCAmelCase ) -> None:
_lowerCAmelCase = 0
for character in keyword:
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , _lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase = len(self.adlist ) - 1
else:
_lowerCAmelCase = next_state
self.adlist[current_state]["output"].append(_lowerCAmelCase )
def _snake_case ( self ) -> None:
_lowerCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = 0
while q:
_lowerCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowerCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
_lowerCAmelCase = self.adlist[state]["fail_state"]
_lowerCAmelCase = self.find_next_state(
_lowerCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase = 0
_lowerCAmelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _snake_case ( self , _lowerCAmelCase ) -> dict[str, list[int]]:
_lowerCAmelCase = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
while (
self.find_next_state(_lowerCAmelCase , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase = self.adlist[current_state]["fail_state"]
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , string[i] )
if next_state is None:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase = []
result[key].append(i - len(_lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 0 |
"""simple docstring"""
import os
A_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->int:
A__ : Optional[int] = 0
A__ : Optional[Any] = 0
while index < len(UpperCAmelCase__ ) - 1:
A__ : Any = SYMBOLS[numerals[index]]
A__ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->str:
A__ : Union[str, Any] = """"""
A__ : Dict = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
A__ : Optional[Any] = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
A__ : Dict = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase ( UpperCAmelCase__ : str = "/p089_roman.txt" ) ->int:
A__ : str = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A__ : List[Any] = filea.readlines()
for line in lines:
A__ : Optional[Any] = line.strip()
A__ : Tuple = parse_roman_numerals(UpperCAmelCase__ )
A__ : Tuple = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(F'{solution() = }')
| 296 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296 | 1 |
'''simple docstring'''
import datasets
a : Union[str, Any] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
a : Tuple = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
a : List[str] = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return {"accuracy": simple_accuracy(snake_case , snake_case )}
| 311 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311 | 1 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = CustomTokenizer
pass
| 302 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : List[Any] , __lowercase : List[Any] , __lowercase : Any=13 , __lowercase : str=7 , __lowercase : Union[str, Any]=True , __lowercase : Any=True , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : List[str]=99 , __lowercase : str=32 , __lowercase : Dict=5 , __lowercase : List[str]=4 , __lowercase : Dict=37 , __lowercase : Optional[int]="gelu" , __lowercase : int=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Tuple=128 , __lowercase : Union[str, Any]=32 , __lowercase : str=16 , __lowercase : List[str]=2 , __lowercase : Optional[int]=0.02 , __lowercase : Any=3 , __lowercase : Any=4 , __lowercase : Optional[Any]=None , ) -> Any:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : int = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Optional[int] = num_choices
__UpperCAmelCase : Any = scope
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : str ) -> str:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase : str = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : int ) -> Any:
__UpperCAmelCase : Union[str, Any] = NezhaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : int = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : Optional[Any] = model(__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : List[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Any , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Tuple , ) -> Optional[int]:
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[Any] = NezhaModel(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__UpperCAmelCase : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Any , __lowercase : int , __lowercase : str , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = NezhaForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : int , __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : str ) -> Dict:
__UpperCAmelCase : Optional[int] = NezhaForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self : Tuple , __lowercase : Any , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[str] ) -> int:
__UpperCAmelCase : Optional[Any] = NezhaForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : int = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Dict ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = NezhaForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : Tuple , __lowercase : Dict , __lowercase : Any , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Union[str, Any] = NezhaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : Dict = NezhaForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Any , __lowercase : int ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.num_choices
__UpperCAmelCase : int = NezhaForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Tuple = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Dict = True
def UpperCAmelCase ( self : Optional[int] , __lowercase : str , __lowercase : Dict , __lowercase : int=False ) -> Dict:
__UpperCAmelCase : Optional[Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
__UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
__UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def UpperCAmelCase ( self : Any ) -> int:
__UpperCAmelCase : Tuple = NezhaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def UpperCAmelCase ( self : int ) -> str:
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCAmelCase : int = None
self.model_tester.create_and_check_model_as_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCAmelCase ( self : int ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def UpperCAmelCase ( self : int ) -> Dict:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : str ) -> Any:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = NezhaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Dict = model_class(config=__lowercase )
__UpperCAmelCase : Union[str, Any] = self._prepare_for_class(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = torch.jit.trace(
__lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , """bert.pt""" ) )
__UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(__lowercase , """bert.pt""" ) , map_location=__lowercase )
loaded(inputs_dict["""input_ids"""].to(__lowercase ) , inputs_dict["""attention_mask"""].to(__lowercase ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Tuple = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(__lowercase , attention_mask=__lowercase )[0]
__UpperCAmelCase : Dict = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Any = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : str ) -> List[str]:
__UpperCAmelCase : int = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
__UpperCAmelCase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase )[0]
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
| 114 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Union[str, Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = inspect.getfile(accelerate.test_utils )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = Accelerator()
__UpperCamelCase : Optional[int] = (accelerator.state.process_index + 2, 10)
__UpperCamelCase : Union[str, Any] = torch.randint(0, 10, shape).to(accelerator.device)
__UpperCamelCase : List[Any] = ''''''
__UpperCamelCase : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__UpperCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__UpperCamelCase : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 309 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309 | 1 |
"""simple docstring"""
lowercase__ = tuple[float, float, float]
lowercase__ = tuple[float, float, float]
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = end_pointa[0] - end_pointa[0]
_lowerCamelCase : List[Any] = end_pointa[1] - end_pointa[1]
_lowerCamelCase : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowerCamelCase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowerCamelCase : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( lowercase__ , lowercase__ ):
return tuple(round(lowercase__ , lowercase__ ) for x in vector ) == (0, 0, 0)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 10 ):
_lowerCamelCase : Optional[int] = create_vector(lowercase__ , lowercase__ )
_lowerCamelCase : Optional[Any] = create_vector(lowercase__ , lowercase__ )
return is_zero_vector(get_ad_vectors_cross(lowercase__ , lowercase__ ) , lowercase__ ) | 96 |
import argparse
import os
import re
lowercase_ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(R'\[([^\]]+)\]')
def a ( A__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowercase =_re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def a ( A__ : Optional[Any] , A__ : Dict="" , A__ : Union[str, Any]=None , A__ : Tuple=None ) -> Dict:
"""simple docstring"""
_lowercase =0
_lowercase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
_lowercase =['\n'.join(lines[:index] )]
else:
_lowercase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase =[lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(A__ ) )
if index < len(A__ ) - 1:
_lowercase =[lines[index + 1]]
index += 1
else:
_lowercase =[]
else:
blocks.append('\n'.join(A__ ) )
_lowercase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('\n'.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
def _inner(A__ : Any ):
return key(A__ ).lower().replace('_' , '' )
return _inner
def a ( A__ : Any , A__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
def noop(A__ : Optional[int] ):
return x
if key is None:
_lowercase =noop
# Constants are all uppercase, they go first.
_lowercase =[obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase =[obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase =[obj for obj in objects if not key(A__ )[0].isupper()]
_lowercase =ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def a ( A__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(A__ : Optional[int] ):
_lowercase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(A__ )] ) + "]"
_lowercase =import_statement.split('\n' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase =2 if lines[1].strip() == '[' else 1
_lowercase =[(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase =sort_objects(A__ , key=lambda A__ : x[1] )
_lowercase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase =keys[:-1]
_lowercase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
_lowercase =_re_bracket_content.sub(_replace , A__ )
return import_statement
def a ( A__ : Dict , A__ : int=True ) -> Optional[Any]:
"""simple docstring"""
with open(A__ , encoding='utf-8' ) as f:
_lowercase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase =split_code_in_indented_blocks(
A__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase =main_blocks[block_idx]
_lowercase =block.split('\n' )
# Get to the start of the imports.
_lowercase =0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase =len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase ='\n'.join(block_lines[line_idx:-1] )
_lowercase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase =split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase =_re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase =[(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase =[(i, key) for i, key in enumerate(A__ ) if key is not None]
_lowercase =[x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase =0
_lowercase =[]
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
_lowercase ='\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
def a ( A__ : List[Any]=True ) -> List[str]:
"""simple docstring"""
_lowercase =[]
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
_lowercase =sort_imports(os.path.join(A__ , '__init__.py' ) , check_only=A__ )
if result:
_lowercase =[os.path.join(A__ , '__init__.py' )]
if len(A__ ) > 0:
raise ValueError(F'''Would overwrite {len(A__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 205 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 59 |
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
UpperCamelCase_ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
UpperCamelCase_ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: int ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) ,reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] ,)
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str]=False ) -> Dict:
UpperCAmelCase_ : List[str] = spearmanr(lowerCamelCase_ ,lowerCamelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 59 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Any: # noqa: E741
"""simple docstring"""
while r - l > 1:
_UpperCAmelCase : List[Any] = (l + r) // 2
if v[m] >= key:
_UpperCAmelCase : Tuple = m
else:
_UpperCAmelCase : List[Any] = m # noqa: E741
return r
def UpperCamelCase_ ( _UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
if len(_UpperCAmelCase ) == 0:
return 0
_UpperCAmelCase : Any = [0] * len(_UpperCAmelCase )
_UpperCAmelCase : str = 1
_UpperCAmelCase : List[str] = v[0]
for i in range(1 , len(_UpperCAmelCase ) ):
if v[i] < tail[0]:
_UpperCAmelCase : Tuple = v[i]
elif v[i] > tail[length - 1]:
_UpperCAmelCase : Tuple = v[i]
length += 1
else:
_UpperCAmelCase : str = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 | '''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase_ ( _UpperCAmelCase : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
_UpperCAmelCase : Any = XGBClassifier()
classifier.fit(_UpperCAmelCase , _UpperCAmelCase )
return classifier
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : List[str] = load_iris()
_UpperCAmelCase , _UpperCAmelCase : Dict = data_handling(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = train_test_split(
_UpperCAmelCase , _UpperCAmelCase , test_size=0.2_5 )
_UpperCAmelCase : Optional[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : Tuple = xgboost(_UpperCAmelCase , _UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , display_labels=_UpperCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 31 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _A :
def __init__( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=13 , __magic_name__ : Dict=7 , __magic_name__ : int=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=True , __magic_name__ : str=99 , __magic_name__ : List[str]=64 , __magic_name__ : str=32 , __magic_name__ : List[str]=5 , __magic_name__ : Dict=4 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : List[str]=16 , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]=3 , __magic_name__ : int=4 , __magic_name__ : str=None , ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[Any] = is_training
__snake_case : List[str] = use_input_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : Union[str, Any] = vocab_size
__snake_case : str = hidden_size
__snake_case : Any = embedding_size
__snake_case : Tuple = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Optional[Any] = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : List[str] = num_labels
__snake_case : Optional[Any] = num_choices
__snake_case : Dict = scope
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : int = None
__snake_case : List[str] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> Any:
"""simple docstring"""
__snake_case : Any = MegatronBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Dict = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
__snake_case : Dict = model(__magic_name__ , token_type_ids=__magic_name__ )
__snake_case : str = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = MegatronBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = MegatronBertForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Dict = MegatronBertForNextSentencePrediction(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Dict = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = MegatronBertForPreTraining(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : str = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , next_sentence_label=__magic_name__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Dict:
"""simple docstring"""
__snake_case : Any = MegatronBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.num_labels
__snake_case : Optional[int] = MegatronBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.num_labels
__snake_case : Optional[Any] = MegatronBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
__snake_case : List[str] = self.num_choices
__snake_case : str = MegatronBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[Any] = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[int] = config_and_inputs
__snake_case : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__: Any = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__: Tuple = True
# test_resize_embeddings = False
lowercase__: Optional[int] = False
def lowercase__ ( self : List[str] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[Any]=False ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ )
__snake_case : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : Dict = MegatronBertModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__magic_name__ )
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__magic_name__ )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__magic_name__ )
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__magic_name__ )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__magic_name__ )
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__magic_name__ )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__magic_name__ )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
__UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
__snake_case : str = os.path.join(os.environ["""MYDIR"""] , __magic_name__ )
__snake_case : Dict = MegatronBertModel.from_pretrained(__magic_name__ )
model.to(__magic_name__ )
model.half()
__snake_case : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__snake_case : List[Any] = model(__magic_name__ )[0]
__snake_case : Union[str, Any] = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : List[str] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__snake_case : int = output[0, ii, jj]
__snake_case : Dict = expected[3 * ii + jj]
__snake_case : Tuple = """ii={} jj={} a={} b={}""".format(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.assertTrue(math.isclose(__magic_name__ , __magic_name__ , rel_tol=__magic_name__ , abs_tol=__magic_name__ ) , msg=__magic_name__ )
| 13 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13 | 1 |
from __future__ import annotations
from statistics import mean
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : List[str] = [0] * no_of_processes
lowercase : List[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__magic_name__ ):
lowercase : Dict = burst_time[i]
lowercase : list[int] = []
lowercase : Union[str, Any] = 0
lowercase : str = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase : List[str] = []
lowercase : Optional[int] = -1
for i in range(__magic_name__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase : int = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
lowercase : int = 0
lowercase : List[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : List[Any] = [0] * no_of_processes
for i in range(__magic_name__ ):
lowercase : List[str] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCAmelCase_ = 4
lowerCAmelCase_ = [2, 5, 3, 7]
lowerCAmelCase_ = [0, 0, 0, 0]
lowerCAmelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''') | 308 |
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 308 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
__A = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = (3, 32, 128)
a = tempfile.mkdtemp()
# fmt: off
a = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
a = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
a = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
a = Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) )
return image_input
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
a = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = self.prepare_image_inputs()
a = image_processor(lowerCamelCase_ , return_tensors="np" )
a = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "test"
a = processor(text=lowerCamelCase_ )
a = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "test"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a = processor.char_decode(lowerCamelCase_ )
a = tokenizer.batch_decode(lowerCamelCase_ )
a = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = None
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = torch.randn(1 , 27 , 38 )
a = torch.randn(1 , 27 , 50257 )
a = torch.randn(1 , 27 , 30522 )
a = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 369 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True} )
__A = Features({"audio": Audio()} )
__A = Features({"transcription": Value("string" )} )
__A = "audio"
__A = "transcription"
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCamelCase_ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a = copy.deepcopy(self )
a = self.input_schema.copy()
a = features[self.audio_column]
a = input_schema
return task_template
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 71 | 0 |
'''simple docstring'''
def _A ( lowercase__ , lowercase__ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowercase__ = str(bin(lowercase__ ) )
binary_number += "0" * shift_amount
return binary_number
def _A ( lowercase__ , lowercase__ ):
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowercase__ = str(bin(lowercase__ ) )[2:]
if shift_amount >= len(lowercase__ ):
return "0b0"
lowercase__ = binary_number[: len(lowercase__ ) - shift_amount]
return "0b" + shifted_binary_number
def _A ( lowercase__ , lowercase__ ):
if number >= 0: # Get binary representation of positive number
lowercase__ = """0""" + str(bin(lowercase__ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__ = len(bin(lowercase__ )[3:] ) # Find 2's complement of number
lowercase__ = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:]
lowercase__ = (
"""1""" + """0""" * (binary_number_length - len(lowercase__ )) + binary_number
)
if shift_amount >= len(lowercase__ ):
return "0b" + binary_number[0] * len(lowercase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowercase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 164 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = CycleDiffusionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
A_ = PipelineTesterMixin.required_optional_params - {"latents"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : int = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : Optional[int] = image / 2 + 0.5
if str(__a ).startswith('mps' ):
__a : Dict = torch.manual_seed(__a )
else:
__a : Dict = torch.Generator(device=__a ).manual_seed(__a )
__a : Optional[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Tuple = self.get_dummy_components()
__a : int = CycleDiffusionPipeline(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : int = self.get_dummy_inputs(__a )
__a : Any = pipe(**__a )
__a : List[str] = output.images
__a : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a : Optional[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__a , 'half' ):
__a : Dict = module.half()
__a : Optional[int] = CycleDiffusionPipeline(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Dict = self.get_dummy_inputs(__a )
__a : Any = pipe(**__a )
__a : int = output.images
__a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a : Any = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__a : Optional[int] = init_image.resize((512, 512) )
__a : List[Any] = 'CompVis/stable-diffusion-v1-4'
__a : List[str] = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : Optional[Any] = CycleDiffusionPipeline.from_pretrained(
__a , scheduler=__a , safety_checker=__a , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'A black colored car'
__a : int = 'A blue colored car'
__a : int = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
__a : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__a : Optional[Any] = init_image.resize((512, 512) )
__a : int = 'CompVis/stable-diffusion-v1-4'
__a : Tuple = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : Tuple = CycleDiffusionPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'A black colored car'
__a : Optional[Any] = 'A blue colored car'
__a : Optional[int] = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
__a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 358 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int ) -> list[list[int]]:
__A : list[list[int]] = []
__A : list[int] = []
__A : Optional[int] = 0
__A : Union[str, Any] = sum(__snake_case )
create_state_space_tree(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return result
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , __snake_case : int , ) -> None:
if sum(__snake_case ) > max_sum or (remaining_nums_sum + sum(__snake_case )) < max_sum:
return
if sum(__snake_case ) == max_sum:
result.append(__snake_case )
return
for index in range(__snake_case , len(__snake_case ) ):
create_state_space_tree(
__snake_case , __snake_case , index + 1 , [*path, nums[index]] , __snake_case , remaining_nums_sum - nums[index] , )
lowercase__ : int = [3, 34, 4, 12, 5, 2]
lowercase__ : Optional[Any] = 9
lowercase__ : str = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 190 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCAmelCase ( __snake_case : str , __snake_case : complex , __snake_case : str = "x" , __snake_case : float = 10**-10 , __snake_case : int = 1 , ) -> complex:
__A : int = symbols(__snake_case )
__A : Tuple = lambdify(__snake_case , __snake_case )
__A : Any = lambdify(__snake_case , diff(__snake_case , __snake_case ) )
__A : str = starting_point
while True:
if diff_function(__snake_case ) != 0:
__A : Optional[Any] = prev_guess - multiplicity * func(__snake_case ) / diff_function(
__snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""") | 190 | 1 |
def a_ ( _lowercase ):
if not numbers:
return 0
if not isinstance(_lowercase , (list, tuple) ) or not all(
isinstance(_lowercase , _lowercase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_UpperCamelCase : Union[str, Any] = numbers[0]
for i in range(1 , len(_lowercase ) ):
# update the maximum and minimum subarray products
_UpperCamelCase : Dict = numbers[i]
if number < 0:
_UpperCamelCase : str = min_till_now, max_till_now
_UpperCamelCase : str = max(_lowercase , max_till_now * number )
_UpperCamelCase : Tuple = min(_lowercase , min_till_now * number )
# update the maximum product found till now
_UpperCamelCase : str = max(_lowercase , _lowercase )
return max_prod
| 360 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 128 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Tuple ,lowercase_ : int=7 ,lowercase_ : List[str]=3 ,lowercase_ : Any=3_0 ,lowercase_ : int=4_0_0 ,lowercase_ : List[str]=True ,lowercase_ : Tuple=None ,lowercase_ : int=0.9 ,lowercase_ : str=None ,lowercase_ : List[str]=True ,lowercase_ : str=[0.5, 0.5, 0.5] ,lowercase_ : Optional[Any]=[0.5, 0.5, 0.5] ,):
lowerCAmelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 3_0}
lowerCAmelCase__ : int = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : List[str] = min_resolution
lowerCAmelCase__ : int = max_resolution
lowerCAmelCase__ : Any = do_resize_and_center_crop
lowerCAmelCase__ : Tuple = size
lowerCAmelCase__ : List[str] = crop_pct
lowerCAmelCase__ : Union[str, Any] = crop_size
lowerCAmelCase__ : Any = do_normalize
lowerCAmelCase__ : int = image_mean
lowerCAmelCase__ : List[Any] = image_std
def __lowerCAmelCase ( self : Dict ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : str = PoolFormerImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
self.assertTrue(hasattr(lowercase_ ,'''crop_pct''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCAmelCase ( self : List[Any] ):
pass
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[Any] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : Tuple ):
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Any = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : List[str] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 106 | """simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : str ):
snake_case__ : str = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=snake_case_ ).to(snake_case_ )
snake_case__ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case__ : List[str] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
snake_case__ : Optional[Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
snake_case__ : Union[str, Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
snake_case__ : str = -(labels.shape[-1] * loss.item())
snake_case__ : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 43 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=0 ):
snake_case__ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
snake_case__ : List[str] = np.random.RandomState(snake_case_ )
snake_case__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Union[str, Any] = pipe(**snake_case_ ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Tuple = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
snake_case__ : List[Any] = pipe(**self.get_dummy_inputs() )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : Optional[int] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : List[str] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = ort.SessionOptions()
snake_case__ : Optional[Any] = False
return options
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = """A fantasy landscape, trending on artstation"""
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : str = output.images
snake_case__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Optional[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : List[Any] = init_image.resize((768, 512) )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Any = output.images
snake_case__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 43 | 1 |
from __future__ import annotations
from math import pi, sqrt
def a__ ( A_, A_ ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'open-llama'
def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = initializer_range
A__ = rms_norm_eps
A__ = use_cache
A__ = kwargs.pop(
'use_memorry_efficient_attention',lowercase_ )
A__ = hidden_dropout_prob
A__ = attention_dropout_prob
A__ = use_stable_embedding
A__ = shared_input_output_embedding
A__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,)
def snake_case__ ( self : str )-> str:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
A__ = self.rope_scaling.get('type',lowercase_ )
A__ = self.rope_scaling.get('factor',lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def A_ ( *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class a ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def A_ ( self : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[Any] ):
snake_case_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
snake_case_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def A_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] ):
snake_case_ = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}],
[{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}],
] , )
@require_torch
def A_ ( self : Dict ):
snake_case_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
snake_case_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case_ = '''How many cats are there?'''
snake_case_ = vqa_pipeline(image=lowercase_ , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] )
snake_case_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] )
@slow
@require_torch
def A_ ( self : Dict ):
snake_case_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
snake_case_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
snake_case_ = '''How many cats are there?'''
snake_case_ = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
snake_case_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
snake_case_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def A_ ( self : Optional[Any] ):
pass
| 72 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 72 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , ) -> Union[str, Any]:
output_path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=__SCREAMING_SNAKE_CASE , output_names=__SCREAMING_SNAKE_CASE , dynamic_axes=__SCREAMING_SNAKE_CASE , do_constant_folding=__SCREAMING_SNAKE_CASE , use_external_data_format=__SCREAMING_SNAKE_CASE , enable_onnx_checker=__SCREAMING_SNAKE_CASE , opset_version=__SCREAMING_SNAKE_CASE , )
else:
export(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=__SCREAMING_SNAKE_CASE , output_names=__SCREAMING_SNAKE_CASE , dynamic_axes=__SCREAMING_SNAKE_CASE , do_constant_folding=__SCREAMING_SNAKE_CASE , opset_version=__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> List[Any]:
__lowerCAmelCase: Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: Any = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__lowerCAmelCase: List[str] = "cpu"
__lowerCAmelCase: Dict = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , torch_dtype=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = Path(__SCREAMING_SNAKE_CASE )
# TEXT ENCODER
__lowerCAmelCase: Optional[int] = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase: Union[str, Any] = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase: Dict = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=__SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase: List[Any] = pipeline.unet.config.in_channels
__lowerCAmelCase: Any = pipeline.unet.config.sample_size
__lowerCAmelCase: Dict = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
torch.randn(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
False,
) , output_path=__SCREAMING_SNAKE_CASE , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=__SCREAMING_SNAKE_CASE , use_external_data_format=__SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Any = str(unet_path.absolute().as_posix() )
__lowerCAmelCase: str = os.path.dirname(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = onnx.load(__SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(__SCREAMING_SNAKE_CASE )
os.mkdir(__SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , save_as_external_data=__SCREAMING_SNAKE_CASE , all_tensors_to_one_file=__SCREAMING_SNAKE_CASE , location="weights.pb" , convert_attribute=__SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase: str = pipeline.vae
__lowerCAmelCase: str = vae_encoder.config.in_channels
__lowerCAmelCase: str = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase: Union[str, Any] = lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : vae_encoder.encode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
__SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__SCREAMING_SNAKE_CASE , )
# VAE DECODER
__lowerCAmelCase: List[str] = pipeline.vae
__lowerCAmelCase: Optional[int] = vae_decoder.config.latent_channels
__lowerCAmelCase: List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase: int = vae_encoder.decode
onnx_export(
__SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase: List[Any] = pipeline.safety_checker
__lowerCAmelCase: str = safety_checker.config.vision_config.num_channels
__lowerCAmelCase: Optional[int] = safety_checker.config.vision_config.image_size
__lowerCAmelCase: Union[str, Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
torch.randn(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=__SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
__lowerCAmelCase: Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
__lowerCAmelCase: Union[str, Any] = pipeline.feature_extractor
else:
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Any = None
__lowerCAmelCase: Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__SCREAMING_SNAKE_CASE )
print("ONNX pipeline saved to" , __SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
__lowerCAmelCase: List[Any] = OnnxStableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 217 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[int])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = metric_id
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if "tmp_path" in args:
__lowerCAmelCase: Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*__SCREAMING_SNAKE_CASE )
| 217 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowerCamelCase ( __snake_case ):
UpperCAmelCase_ = 'xlm-roberta-xl'
def __init__(self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.02 , __a=1e-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class _lowerCamelCase ( __snake_case ):
@property
def snake_case_ (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 362 |
"""simple docstring"""
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
UpperCamelCase = min(_SCREAMING_SNAKE_CASE )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
UpperCamelCase = mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 244 | 0 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : tuple[int, int] , lowercase : int ) -> list[tuple[int, int]]:
__snake_case , __snake_case : Optional[Any] = position
__snake_case : List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__snake_case : Dict = []
for position in positions:
__snake_case , __snake_case : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowercase )
return permissible_positions
def lowerCAmelCase__( lowercase : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase__( lowercase : list[list[int]] , lowercase : tuple[int, int] , lowercase : int ) -> bool:
if is_complete(lowercase ):
return True
for position in get_valid_pos(lowercase , len(lowercase ) ):
__snake_case , __snake_case : List[str] = position
if board[y][x] == 0:
__snake_case : int = curr + 1
if open_knight_tour_helper(lowercase , lowercase , curr + 1 ):
return True
__snake_case : Optional[Any] = 0
return False
def lowerCAmelCase__( lowercase : int ) -> list[list[int]]:
__snake_case : Tuple = [[0 for i in range(lowercase )] for j in range(lowercase )]
for i in range(lowercase ):
for j in range(lowercase ):
__snake_case : Optional[Any] = 1
if open_knight_tour_helper(lowercase , (i, j) , 1 ):
return board
__snake_case : List[Any] = 0
__snake_case : Tuple = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326 | 1 |
def __lowerCamelCase ( __magic_name__ : int = 3 , __magic_name__ : int = 7 , __magic_name__ : int = 1_000_000 ):
a__: List[str] =0
a__: Any =1
for current_denominator in range(1 , limit + 1 ):
a__: str =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__: int =current_numerator
a__: int =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 363 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : str = int(np.ceil((x_end - xa) / step_size ) )
_A : Union[str, Any] = np.zeros((n + 1,) )
_A : Dict = ya
_A : List[str] = xa
for k in range(snake_case_ ):
_A : int = y[k] + step_size * ode_func(snake_case_,y[k] )
_A : Any = y[k] + (
(step_size / 2) * (ode_func(snake_case_,y[k] ) + ode_func(x + step_size,snake_case_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = """ctrl"""
_UpperCamelCase : str = ["""past_key_values"""]
_UpperCamelCase : Optional[int] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=24_6534 , snake_case=256 , snake_case=1280 , snake_case=8192 , snake_case=48 , snake_case=16 , snake_case=0.1 , snake_case=0.1 , snake_case=1E-6 , snake_case=0.02 , snake_case=True , **snake_case , ):
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = dff
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = use_cache
super().__init__(**snake_case )
| 195 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = test_results.split(' ' )
lowercase = 0
lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
lowercase = None
lowercase = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , __SCREAMING_SNAKE_CASE ):
lowercase = True
lowercase = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowercase = line
lowercase = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = title
lowercase = doc_test_results['time_spent'].split(',' )[0]
lowercase = doc_test_results['success']
lowercase = doc_test_results['failures']
lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self._time_spent]
lowercase = 0
for time in time_spent:
lowercase = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case ) == 1:
lowercase = [0, 0, time_parts[0]]
lowercase , lowercase , lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowercase , lowercase , lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 40
lowercase = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case , snake_case )}
lowercase = ''
for category, failures in category_failures.items():
if len(snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
lowercase = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(snake_case )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowercase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
lowercase = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = ''
for key, value in failures.items():
lowercase = value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowercase = job_name
lowercase = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowercase = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowercase = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowercase = sorted(self.doc_test_results.items() , key=lambda snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowercase = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowercase = job_result['failures']
lowercase = self.get_reply_blocks(snake_case , snake_case , snake_case , text=snake_case )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=snake_case , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCAmelCase_ ( ):
lowercase = os.environ['GITHUB_RUN_ID']
lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase = requests.get(__SCREAMING_SNAKE_CASE ).json()
lowercase = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase = os.listdir(__SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f:
lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}.''' ) from e
return _artifact
def UpperCAmelCase_ ( ):
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = name
lowercase = []
def __str__( self ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.paths.append({'name': self.name, 'path': path} )
lowercase = {}
lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase = directory
if artifact_name not in _available_artifacts:
lowercase = Artifact(__SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(__SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase = get_job_links()
UpperCAmelCase = retrieve_available_artifacts()
UpperCAmelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase = github_actions_job_links.get('''run_doctests''')
UpperCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCAmelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['''stats'''])
UpperCAmelCase = failed
UpperCAmelCase = success
UpperCAmelCase = time_spent[1:-1] + ''', '''
UpperCAmelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCAmelCase = line.replace('''FAILED ''', '''''')
UpperCAmelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCAmelCase , UpperCAmelCase = line.split('''::''')
else:
UpperCAmelCase , UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase = all_failures[test] if test in all_failures else '''N/A'''
UpperCAmelCase = failure
break
UpperCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 195 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : int = 10_00 ) -> int:
"""simple docstring"""
a_ : int = 3
a_ : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : str = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : Any = StableDiffusionXLImgaImgPipeline
snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase :Tuple = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase )
UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase )
UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ):
UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :List[str] = image / 2 + 0.5
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def _A ( self : str ):
UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[Any] = self.get_dummy_components()
UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images
UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Dict ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _A ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _A ( self : Union[str, Any] ):
pass
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = self.get_dummy_components()
UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase )
UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
# forward without prompt embeds
UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * ["""this is a negative prompt"""]
UpperCamelCase :Union[str, Any] = negative_prompt
UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]]
UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase )
UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""]
UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase )
UpperCamelCase :Dict = sd_pipe(
**__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ):
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :str = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images
UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 38 | 0 |
'''simple docstring'''
import itertools
import math
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Dict = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase_ ( lowerCAmelCase__ : int = 10001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 353 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = (3, 32, 128)
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
__UpperCAmelCase : List[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) )
return image_input
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" )
__UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Dict = """test"""
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = """test"""
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase )
__UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 )
__UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 )
__UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 )
__UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50 | 1 |
import baseaa
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8'''))
def lowerCAmelCase__ ( lowerCamelCase_ : bytes):
'''simple docstring'''
return baseaa.baadecode(lowerCamelCase_).decode('''utf-8''')
if __name__ == "__main__":
__snake_case : Tuple ='Hello World!'
__snake_case : Optional[int] =baseaa_encode(test)
print(encoded)
__snake_case : List[str] =baseaa_decode(encoded)
print(decoded)
| 94 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
__snake_case : Tuple ={
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""vit_msn"""
def __init__(self ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-06 ,__lowerCamelCase=2_24 ,__lowerCamelCase=16 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,**__lowerCamelCase ,) -> Any:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : str = patch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : int = qkv_bias
| 94 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.