code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = True
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase__ : Tuple[int] = (64,) , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : float = 0.1_8215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE : Union[str, Any] = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE : Optional[int] = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , act_fn=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE : int = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE : List[str] = self.config.sample_size
SCREAMING_SNAKE_CASE : Tuple = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE : List[Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.25
def __A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE : Optional[Any] = value
def __A ( self : List[Any] , UpperCamelCase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = use_tiling
def __A ( self : int ):
'''simple docstring'''
self.enable_tiling(UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = True
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase__ , '''set_processor''' ):
SCREAMING_SNAKE_CASE : List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def __A ( self : Optional[int] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : str ):
if hasattr(UpperCamelCase__ , '''set_processor''' ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE : str = [self.encoder(UpperCamelCase__ ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = self.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = self.quant_conv(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = self.post_quant_conv(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.decoder(UpperCamelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
@apply_forward_hook
def __A ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self._decode(UpperCamelCase__ ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = self._decode(UpperCamelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a.shape[2] , b.shape[2] , UpperCamelCase__ )
for y in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __A ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = min(a.shape[3] , b.shape[3] , UpperCamelCase__ )
for x in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : Any = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Tuple = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(0 , x.shape[2] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = []
for j in range(0 , x.shape[3] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.quant_conv(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = []
for i, row in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : str = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(UpperCamelCase__ , dim=2 )
SCREAMING_SNAKE_CASE : List[Any] = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE : Tuple = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(0 , z.shape[2] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = []
for j in range(0 , z.shape[3] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE : Optional[Any] = self.post_quant_conv(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.decoder(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, row in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Any = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
SCREAMING_SNAKE_CASE : List[str] = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
SCREAMING_SNAKE_CASE : str = torch.cat(UpperCamelCase__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[torch.Generator] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = sample
SCREAMING_SNAKE_CASE : List[str] = self.encode(UpperCamelCase__ ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE : int = posterior.sample(generator=UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = posterior.mode()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.decode(UpperCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
| 248 | import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : List[str] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Dict=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : str = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : List[Any] = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Dict = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : Optional[int] = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 248 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
_lowercase = MODEL_FOR_MASKED_LM_MAPPING
_lowercase = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase : Union[str, Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
_UpperCAmelCase : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCAmelCase : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase : List[str] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase : str = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
_UpperCAmelCase : List[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
_UpperCAmelCase : Optional[Any] = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase : int = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(A_ , A_ )
@slow
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(A_ )
@slow
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(A_ ) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCAmelCase : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(A_ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase : Any = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(A_ ) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[Any] = None
self.run_pipeline_test(A_ , [] )
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : str = None
self.run_pipeline_test(A_ , [] )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase : Tuple = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : Tuple = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = fill_masker.tokenizer
_UpperCAmelCase : Tuple = fill_masker.model
_UpperCAmelCase : Union[str, Any] = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : Optional[Any] = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : List[str] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
A_ , [
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
] , )
with self.assertRaises(A_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(A_ ):
fill_masker("This is" )
self.run_test_top_k(A_ , A_ )
self.run_test_targets(A_ , A_ )
self.run_test_top_k_targets(A_ , A_ )
self.fill_mask_with_duplicate_targets_and_top_k(A_ , A_ )
self.fill_mask_with_multiple_masks(A_ , A_ )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = tokenizer.get_vocab()
_UpperCAmelCase : int = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase : Any = FillMaskPipeline(model=A_ , tokenizer=A_ , targets=A_ )
_UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , A_ )
_UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(A_ ) )
# Call argument
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : List[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=A_ )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , A_ )
_UpperCAmelCase : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(A_ ) )
# Score equivalence
_UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , targets=A_ )
_UpperCAmelCase : Tuple = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase : Union[str, Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ) == set(A_ ):
_UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=A_ )
_UpperCAmelCase : List[str] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
# Raises with invalid
with self.assertRaises(A_ ):
_UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(A_ ):
_UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(A_ ):
_UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=A_ , tokenizer=A_ , top_k=2 )
_UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
_UpperCAmelCase : int = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : Optional[Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
A_ , [
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
] , )
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = tokenizer.get_vocab()
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=A_ , tokenizer=A_ )
# top_k=2, ntargets=3
_UpperCAmelCase : Tuple = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=A_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase : Tuple = [el["token_str"] for el in sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ).issubset(A_ ):
_UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=A_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
_UpperCAmelCase : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase : Dict = fill_masker(f'My name is {tokenizer.mask_token}' , targets=A_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(A_ ) , 3 )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = FillMaskPipeline(model=A_ , tokenizer=A_ )
_UpperCAmelCase : List[str] = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
A_ , [
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
[
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
{"sequence": ANY(A_ ), "score": ANY(A_ ), "token": ANY(A_ ), "token_str": ANY(A_ )},
],
] , )
| 467 |
from __future__ import annotations
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[float] ) -> Dict:
return np.maximum(0 , lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 467 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: List[Any]=30 ,__lowerCAmelCase: Any=400 ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Any=0.9 ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : List[str] = size if size is not None else {"shortest_edge": 30}
_lowerCamelCase : List[Any] = crop_size if crop_size is not None else {"height": 30, "width": 30}
_lowerCamelCase : List[str] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : Dict = max_resolution
_lowerCamelCase : Dict = do_resize_and_center_crop
_lowerCamelCase : int = size
_lowerCamelCase : List[str] = crop_pct
_lowerCamelCase : Tuple = crop_size
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : str = image_mean
_lowerCamelCase : Optional[Any] = image_std
def _lowercase ( self: List[str] ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = PoolFormerImageProcessor if is_vision_available() else None
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = PoolFormerImageProcessingTester(self )
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"crop_pct" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
_lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : List[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Tuple = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,) | 46 |
import numpy as np
from transformers import Pipeline
def __UpperCamelCase ( lowerCAmelCase__ : Tuple ):
__a : Union[str, Any] = np.max(lowerCAmelCase__ , axis=-1 , keepdims=lowerCAmelCase__ )
__a : List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ )
class UpperCamelCase__ ( __lowercase ):
def lowerCAmelCase (self : Dict , **snake_case_ : str ):
__a : int = {}
if "second_text" in kwargs:
__a : Tuple = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : str=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCAmelCase (self : Tuple , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Dict ):
__a : Optional[Any] = model_outputs.logits[0].numpy()
__a : Optional[int] = softmax(snake_case_ )
__a : Optional[int] = np.argmax(snake_case_ )
__a : str = self.model.config.idalabel[best_class]
__a : Any = probabilities[best_class].item()
__a : Optional[int] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 521 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : torch.FloatTensor
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : Dict=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Optional[int]="silu" , lowerCamelCase_ : List[str]=True , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = layers_per_block
_snake_case : str = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = nn.ModuleList([] )
# down
_snake_case : Any = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
_snake_case : List[Any] = output_channel
_snake_case : Tuple = block_out_channels[i]
_snake_case : List[str] = i == len(lowerCamelCase_ ) - 1
_snake_case : List[str] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
_snake_case : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
_snake_case : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
_snake_case : Optional[int] = nn.SiLU()
_snake_case : List[Any] = 2 * out_channels if double_z else out_channels
_snake_case : int = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
_snake_case : str = False
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : str = x
_snake_case : Tuple = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : List[Any] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
_snake_case : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
_snake_case : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
_snake_case : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case : Tuple = down_block(lowerCamelCase_ )
# middle
_snake_case : List[str] = self.mid_block(lowerCamelCase_ )
# post-process
_snake_case : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
_snake_case : int = self.conv_act(lowerCamelCase_ )
_snake_case : Optional[Any] = self.conv_out(lowerCamelCase_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=("UpDecoderBlock2D",) , lowerCamelCase_ : Optional[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[Any]="group" , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = layers_per_block
_snake_case : Tuple = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case : str = None
_snake_case : List[Any] = nn.ModuleList([] )
_snake_case : str = in_channels if norm_type == 'spatial' else None
# mid
_snake_case : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
_snake_case : Tuple = list(reversed(lowerCamelCase_ ) )
_snake_case : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
_snake_case : int = output_channel
_snake_case : Any = reversed_block_out_channels[i]
_snake_case : Any = i == len(lowerCamelCase_ ) - 1
_snake_case : Tuple = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
_snake_case : Dict = output_channel
# out
if norm_type == "spatial":
_snake_case : Dict = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
_snake_case : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
_snake_case : Optional[Any] = nn.SiLU()
_snake_case : Optional[int] = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
_snake_case : List[Any] = False
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None ):
'''simple docstring'''
_snake_case : Tuple = z
_snake_case : Any = self.conv_in(lowerCamelCase_ )
_snake_case : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : int ):
def custom_forward(*lowerCamelCase_ : Dict ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
_snake_case : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_snake_case : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
_snake_case : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_snake_case : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
_snake_case : List[Any] = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
_snake_case : str = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
_snake_case : Tuple = self.conv_norm_out(lowerCamelCase_ )
else:
_snake_case : List[Any] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = self.conv_act(lowerCamelCase_ )
_snake_case : Union[str, Any] = self.conv_out(lowerCamelCase_ )
return sample
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]="random" , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : str=True ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = n_e
_snake_case : Dict = vq_embed_dim
_snake_case : int = beta
_snake_case : List[Any] = legacy
_snake_case : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case : List[Any] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case : str = self.used.shape[0]
_snake_case : str = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case : Union[str, Any] = self.re_embed
_snake_case : int = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_snake_case : List[Any] = n_e
_snake_case : List[str] = sane_index_shape
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = inds.shape
assert len(lowerCamelCase_ ) > 1
_snake_case : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_snake_case : List[str] = self.used.to(lowerCamelCase_ )
_snake_case : List[Any] = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case : Union[str, Any] = match.argmax(-1 )
_snake_case : Optional[int] = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case : str = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case : Tuple = self.unknown_index
return new.reshape(lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Dict = inds.shape
assert len(lowerCamelCase_ ) > 1
_snake_case : str = inds.reshape(ishape[0] , -1 )
_snake_case : Dict = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case : Tuple = 0 # simply set to zero
_snake_case : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case : Tuple = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case : Optional[Any] = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
_snake_case : Any = self.embedding(lowerCamelCase_ ).view(z.shape )
_snake_case : Union[str, Any] = None
_snake_case : Dict = None
# compute loss for embedding
if not self.legacy:
_snake_case : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case : Optional[int] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case : Union[str, Any] = self.remap_to_used(lowerCamelCase_ )
_snake_case : Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
_snake_case : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case : Tuple = self.unmap_to_all(lowerCamelCase_ )
_snake_case : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case : Any = self.embedding(lowerCamelCase_ )
if shape is not None:
_snake_case : Tuple = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
_snake_case : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str]=False ):
'''simple docstring'''
_snake_case : List[str] = parameters
_snake_case , _snake_case : Tuple = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
_snake_case : Dict = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case : Optional[Any] = deterministic
_snake_case : str = torch.exp(0.5 * self.logvar )
_snake_case : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
_snake_case : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
_snake_case : Tuple = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case : Union[str, Any] = self.mean + self.std * sample
return x
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case : Optional[int] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return self.mean
| 652 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A: Optional[int] = 4
A: Tuple = (1 << p) - 1
for _ in range(p - 2 ):
A: List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 135 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : Optional[Any] ='▁'
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A: Optional[int] = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
A: Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
A: Tuple = do_lower_case
A: Optional[Any] = remove_space
A: int = keep_accents
A: str = vocab_file
A: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def a__ ( self ) -> Dict:
return len(self.sp_model )
def a__ ( self ) -> Any:
A: Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
A: List[Any] = self.__dict__.copy()
A: List[str] = None
return state
def __setstate__( self , A ) -> Dict:
A: str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A: Tuple = {}
A: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , A ) -> List[Any]:
if self.remove_space:
A: str = """ """.join(inputs.strip().split() )
else:
A: Optional[Any] = inputs
A: int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A: Tuple = unicodedata.normalize("""NFKD""" , A )
A: Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
A: Tuple = outputs.lower()
return outputs
def a__ ( self , A ) -> List[str]:
A: List[str] = self.preprocess_text(A )
A: str = self.sp_model.encode(A , out_type=A )
A: str = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A: Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A: List[Any] = cur_pieces[1:]
else:
A: Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def a__ ( self , A ) -> Tuple:
return self.sp_model.PieceToId(A )
def a__ ( self , A ) -> List[str]:
return self.sp_model.IdToPiece(A )
def a__ ( self , A ) -> Any:
A: Any = []
A: Dict = """"""
A: int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
A: Dict = True
A: str = []
else:
current_sub_tokens.append(A )
A: List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ ( self , A , A = None ) -> List[int]:
A: Any = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: List[str] = [self.sep_token_id]
A: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
A: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 135 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Tuple = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 419 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : int = tempfile.mkdtemp()
snake_case__ : Optional[int] = 8
# DPR tok
snake_case__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Dict = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Optional[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __UpperCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
snake_case__ : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case__ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
snake_case__ : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
snake_case__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 419 | 1 |
__a : Optional[Any] = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> int:
"""simple docstring"""
__A = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
__A = 0
__A = 0
while place < len(__lowercase ):
if (place + 1 < len(__lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> str:
"""simple docstring"""
__A = []
for arabic, roman in ROMAN:
((__A) , (__A)) = divmod(__lowercase , __lowercase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = SqueezeBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
| 637 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = """▁"""
UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Any = BertGenerationTokenizer
A__ : Optional[Any] = False
A__ : Tuple = True
def snake_case__ ( self ) -> int:
super().setUp()
A__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ) -> str:
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[Any]:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1002 )
def snake_case__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case__ ( self ) -> List[str]:
A__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def snake_case__ ( self ) -> Any:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def snake_case__ ( self ) -> List[Any]:
A__ = "Hello World!"
A__ = [18536, 2260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def snake_case__ ( self ) -> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(SCREAMING_SNAKE_CASE__ )
A__ = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
A__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
A__ = BertGenerationConfig()
A__ = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
# fmt: off
A__ = {"input_ids": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 562 |
"""simple docstring"""
import numpy as np
def _lowerCamelCase ( UpperCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase ( UpperCAmelCase_ : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 562 | 1 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase : int = logging.get_logger(__name__)
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["pixel_values"]
def __init__( self : Optional[Any] , A_ : bool = True , A_ : Union[int, float] = 1 / 2_55 , A_ : bool = True , A_ : int = 8 , **A_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: str = do_rescale
lowerCamelCase_: List[str] = rescale_factor
lowerCamelCase_: Dict = do_pad
lowerCamelCase_: List[Any] = pad_size
def lowerCAmelCase ( self : Tuple , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Optional[int] , A_ : np.ndarray , A_ : int , A_ : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[int] = get_image_size(A_ )
lowerCamelCase_: Tuple = (old_height // size + 1) * size - old_height
lowerCamelCase_: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(A_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=A_ )
def lowerCAmelCase ( self : str , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : str , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_: Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowerCamelCase_: List[Any] = pad_size if pad_size is not None else self.pad_size
lowerCamelCase_: Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_: List[str] = [to_numpy_array(A_ ) for image in images]
if do_rescale:
lowerCamelCase_: Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_pad:
lowerCamelCase_: List[str] = [self.pad(A_ , size=A_ ) for image in images]
lowerCamelCase_: Any = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_: Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 423 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = [0] * len(lowerCamelCase__ )
A_ : Optional[int] = []
A_ : Any = []
A_ : List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCamelCase__ )
while queue:
A_ : str = queue.pop(0 )
cnt += 1
topo.append(lowerCamelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase__ )
if cnt != len(lowerCamelCase__ ):
print("""Cycle exists""" )
else:
print(lowerCamelCase__ )
# Adjacency List of Graph
lowerCamelCase :Tuple = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 702 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: Any ) -> Tuple:
_A = tempfile.mkdtemp()
_A = 8
# DPR tok
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_A = {'''unk_token''': '''<unk>'''}
_A = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__A , exist_ok=__A )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(__A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: List[str] ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __A ( self: Dict ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __A ( self: Dict ) -> List[str]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __A ( self: Union[str, Any] ) -> List[Any]:
_A = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
_A = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__A )
rag_tokenizer.save_pretrained(__A )
_A = RagTokenizer.from_pretrained(__A , config=__A )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __A )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __A )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __A ( self: Any ) -> Optional[Any]:
_A = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
_A = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_A = tokenizer(__A )
self.assertIsNotNone(__A )
@slow
def __A ( self: Union[str, Any] ) -> Optional[int]:
_A = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
_A = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_A = tokenizer(__A )
self.assertIsNotNone(__A )
| 484 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "audio-spectrogram-transformer"
def __init__( self: Optional[Any] , __A: int=7_68 , __A: Optional[Any]=12 , __A: Tuple=12 , __A: Union[str, Any]=30_72 , __A: str="gelu" , __A: str=0.0 , __A: List[Any]=0.0 , __A: List[str]=0.02 , __A: List[str]=1e-12 , __A: Any=16 , __A: Dict=True , __A: Optional[Any]=10 , __A: Union[str, Any]=10 , __A: str=10_24 , __A: Optional[int]=1_28 , **__A: Tuple , ) -> List[Any]:
super().__init__(**__A )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = patch_size
_A = qkv_bias
_A = frequency_stride
_A = time_stride
_A = max_length
_A = num_mel_bins
| 484 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase__ ={
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
lowerCAmelCase__ ={
"RUCAIBox/mvp": 1_024,
}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = MvpTokenizer
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Tuple="replace" , __SCREAMING_SNAKE_CASE : Any="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : str="<pad>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Dict=True , **__SCREAMING_SNAKE_CASE : Any , ) -> int:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__SCREAMING_SNAKE_CASE = '''post_processor'''
__SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__SCREAMING_SNAKE_CASE = tuple(state['''sep'''] )
if "cls" in state:
__SCREAMING_SNAKE_CASE = tuple(state['''cls'''] )
__SCREAMING_SNAKE_CASE = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__SCREAMING_SNAKE_CASE = trim_offsets
__SCREAMING_SNAKE_CASE = True
if changes_to_apply:
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def _a ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__SCREAMING_SNAKE_CASE = value
def _a ( self : str , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : str ) -> BatchEncoding:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> BatchEncoding:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 690 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ ={
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 690 | 1 |
def _SCREAMING_SNAKE_CASE ( __lowercase : int , __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
__A = _modexpt(__lowercase , exponent // 2 , __lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowercase , exponent - 1 , __lowercase )) % modulo_value
def _SCREAMING_SNAKE_CASE ( __lowercase : int = 1_7_7_7 , __lowercase : int = 1_8_5_5 , __lowercase : int = 8 ) -> int:
"""simple docstring"""
__A = base
for _ in range(1 , __lowercase ):
__A = _modexpt(__lowercase , __lowercase , 1_0**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 637 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __lowercase : list[int] , __lowercase : int ) -> bool:
"""simple docstring"""
if len(__lowercase ) == 0:
return False
__A = len(__lowercase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowercase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowercase )
if __name__ == "__main__":
__a : Tuple = input("Enter numbers separated by comma:\n").strip()
__a : Any = [int(item.strip()) for item in user_input.split(",")]
__a : List[Any] = int(input("Enter the number to be found in the list:\n").strip())
__a : Optional[int] = "" if binary_search(sequence, target) else "not "
print(f"""{target} was {not_str}found in {sequence}""")
| 637 | 1 |
"""simple docstring"""
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ =multiprocessing.Manager()
A__ =manager.list()
A__ =multiprocessing.Process(target=UpperCamelCase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ =shutil.rmtree
A__ =os.rmdir
A__ =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ ={}
with swallow_io():
with time_limit(UpperCamelCase__ ):
exec(UpperCamelCase__ , UpperCamelCase__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
A__ =rmtree
A__ =rmdir
A__ =chdir
@contextlib.contextmanager
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def signal_handler(UpperCamelCase__ , UpperCamelCase__ ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , UpperCamelCase__ )
signal.signal(signal.SIGALRM , UpperCamelCase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
A__ =WriteOnlyStringIO()
with contextlib.redirect_stdout(UpperCamelCase__ ):
with contextlib.redirect_stderr(UpperCamelCase__ ):
with redirect_stdin(UpperCamelCase__ ):
yield
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(UpperCamelCase__ ):
yield dirname
class UpperCamelCase__( SCREAMING_SNAKE_CASE__ ):
pass
class UpperCamelCase__( io.StringIO ):
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
raise OSError
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
raise OSError
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
raise OSError
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
return False
class UpperCamelCase__( contextlib._RedirectStream ): # type: ignore
lowerCAmelCase__ : Tuple = 'stdin'
@contextlib.contextmanager
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if root == ".":
yield
return
A__ =os.getcwd()
os.chdir(UpperCamelCase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ =None
A__ =None
import os
A__ ="1"
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
import shutil
A__ =None
A__ =None
A__ =None
import subprocess
A__ =None # type: ignore
A__ =None
import sys
A__ =None
A__ =None
A__ =None
A__ =None
A__ =None
| 719 | """simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [0] * no_of_processes
A__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase__ ):
A__ = burst_time[i]
A__ = 0
A__ = 0
A__ = 999_999_999
A__ = 0
A__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A__ = remaining_time[j]
A__ = j
A__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A__ = remaining_time[short]
if minm == 0:
A__ = 999_999_999
if remaining_time[short] == 0:
complete += 1
A__ = False
# Find finish time of current process
A__ = increment_time + 1
# Calculate waiting time
A__ = finish_time - arrival_time[short]
A__ = finar - burst_time[short]
if waiting_time[short] < 0:
A__ = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
A__ = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 0
A__ = 0
for i in range(UpperCamelCase__ ):
A__ = total_waiting_time + waiting_time[i]
A__ = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__lowerCamelCase = int(input())
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__lowerCamelCase , __lowerCamelCase = map(int, input().split())
__lowerCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCamelCase = burst_time
__lowerCamelCase = no_of_processes
__lowerCamelCase = waiting_time
__lowerCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 536 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = [randint(-10_00 , 10_00 ) for i in range(10 )]
UpperCAmelCase = randint(-50_00 , 50_00 )
return (arr, r)
_lowercase : int = make_dataset()
def lowerCamelCase__ ( A : str , A : Optional[int] ):
'''simple docstring'''
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def lowerCamelCase__ ( A : Optional[int] , A : Dict ):
'''simple docstring'''
arr.sort()
UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=1_00_00 )
UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=1_00_00 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : Optional[int] = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 210 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A__ = logging.get_logger(__name__)
class a :
__lowerCAmelCase : Optional[Any] = None
@experimental
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return _map_with_joblib(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = num_proc if num_proc <= len(__lowerCAmelCase ) else len(__lowerCAmelCase )
snake_case__ : int = [] # We organize the splits ourselve (contiguous splits)
for index in range(__lowerCAmelCase ):
snake_case__ : List[Any] = len(__lowerCAmelCase ) // num_proc
snake_case__ : Tuple = len(__lowerCAmelCase ) % num_proc
snake_case__ : Any = div * index + min(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(__lowerCAmelCase )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(__lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
snake_case__ , snake_case__ : List[str] = None, None
if not disable_tqdm:
snake_case__ , snake_case__ : Any = (RLock(),), tqdm.set_lock
with Pool(__lowerCAmelCase , initargs=__lowerCAmelCase , initializer=__lowerCAmelCase ) as pool:
snake_case__ : Optional[int] = pool.map(__lowerCAmelCase , __lowerCAmelCase )
logger.info(f"""Finished {num_proc} processes""" )
snake_case__ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(__lowerCAmelCase )} objects""" )
return mapped
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowerCAmelCase ):
return joblib.Parallel()(
joblib.delayed(__lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case__ : int = None
| 252 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ) -> Optional[Any]:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase_ , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = OpenLlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
UpperCamelCase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = OpenLlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
UpperCamelCase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
UpperCamelCase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
UpperCamelCase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['hidden_states'][0]
UpperCamelCase__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['hidden_states'][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
UpperCamelCase__
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : List[str] =(OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict =False
SCREAMING_SNAKE_CASE_ : List[str] =False
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = OpenLlamaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = input_dict['input_ids']
UpperCamelCase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = 'single_label_classification'
UpperCamelCase__ = input_dict['input_ids']
UpperCamelCase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = 3
UpperCamelCase__ = 'multi_label_classification'
UpperCamelCase__ = input_dict['input_ids']
UpperCamelCase__ = input_ids.ne(1 ).to(UpperCamelCase_ )
UpperCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _a (self ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = OpenLlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
UpperCamelCase__ = original_model(UpperCamelCase_ ).last_hidden_state
UpperCamelCase__ = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase__ = OpenLlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
UpperCamelCase__ = scaled_model(UpperCamelCase_ ).last_hidden_state
UpperCamelCase__ = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 704 | # Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__magic_name__ ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__magic_name__ =concatenate_datasets
__magic_name__ =DownloadConfig
__magic_name__ =DownloadManager
__magic_name__ =DownloadMode
__magic_name__ =DownloadConfig
__magic_name__ =DownloadMode
__magic_name__ =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 469 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
SCREAMING_SNAKE_CASE = {
"""abeja/gpt-neox-japanese-2.7b""": 2_0_4_8,
}
def snake_case_ ( lowercase__ , lowercase__ ):
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Any = json.loads(f.read() )
UpperCAmelCase__ : Dict = collections.OrderedDict()
UpperCAmelCase__ : Optional[Any] = collections.OrderedDict()
UpperCAmelCase__ : Optional[int] = collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase__ : int = f.readlines()
UpperCAmelCase__ : int = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
UpperCAmelCase__ : Dict = b
UpperCAmelCase__ : str = idx
for wd in b:
UpperCAmelCase__ : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Dict="<|endoftext|>" , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : int="<|startoftext|>" , snake_case__ : List[str]="<|endoftext|>" , snake_case__ : Any=False , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , pad_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , do_clean_text=snake_case__ , **snake_case__ , )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase__ : Any = do_clean_text
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = load_vocab_and_emoji(snake_case__ , snake_case__ )
UpperCAmelCase__ : Any = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(snake_case__ , clean=self.do_clean_text )
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCamelCase ( self : Any , snake_case__ : Union[str, Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = "".join(snake_case__ ).strip()
return out_string
def UpperCamelCase ( self : List[Any] , snake_case__ : "Conversation" ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
UpperCAmelCase__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
if os.path.isdir(snake_case__ ):
UpperCAmelCase__ : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase__ : Union[str, Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase__ : int = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ : Optional[int] = token_index
writer.write(",".join(snake_case__ ) + "\n" )
index += 1
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , snake_case__ )
return vocab_file, emoji_file
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = vocab # same as swe
UpperCAmelCase__ : Any = ids_to_tokens # same as bpe
UpperCAmelCase__ : Union[str, Any] = emoji
UpperCAmelCase__ : Optional[Any] = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
UpperCAmelCase__ : Optional[int] = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase__ : Any = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase__ : Any = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase__ : int = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase__ : List[Any] = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase__ : List[Any] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase__ : Optional[Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase__ : List[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase__ : Dict = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.content_repattera.sub("<URL>" , snake_case__ )
UpperCAmelCase__ : Any = self.content_repattera.sub("<EMAIL>" , snake_case__ )
UpperCAmelCase__ : str = self.content_repattera.sub("<TEL>" , snake_case__ )
UpperCAmelCase__ : str = self.content_repattera.sub("<DATE>" , snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.content_repattera.sub("<DATE>" , snake_case__ )
UpperCAmelCase__ : Dict = self.content_repattera.sub("<PRICE>" , snake_case__ )
UpperCAmelCase__ : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase__ : Optional[int] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = text.replace(" " , "<SP>" )
UpperCAmelCase__ : Optional[Any] = text.replace(" " , "<SP>" )
UpperCAmelCase__ : Union[str, Any] = text.replace("\r\n" , "<BR>" )
UpperCAmelCase__ : List[str] = text.replace("\n" , "<BR>" )
UpperCAmelCase__ : Union[str, Any] = text.replace("\r" , "<BR>" )
UpperCAmelCase__ : str = text.replace("\t" , "<TAB>" )
UpperCAmelCase__ : Tuple = text.replace("—" , "ー" )
UpperCAmelCase__ : Union[str, Any] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase__ : List[Any] = text.replace(snake_case__ , snake_case__ )
if clean:
UpperCAmelCase__ : Union[str, Any] = self.clean_text(snake_case__ )
def check_simbol(snake_case__ : str ):
UpperCAmelCase__ : Tuple = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
UpperCAmelCase__ : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(snake_case__ : Any ):
UpperCAmelCase__ : List[str] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
UpperCAmelCase__ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE28080 and c <= 0xE2B07F:
return True
return False
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Optional[Any] = []
while pos < len(snake_case__ ):
UpperCAmelCase__ : Optional[Any] = min(len(snake_case__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase__ : Optional[int] = [] # (token_id, token, pos)
for e in range(snake_case__ , snake_case__ , -1 ):
UpperCAmelCase__ : List[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
UpperCAmelCase__ : Optional[int] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = sorted(snake_case__ , key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
UpperCAmelCase__ : Any = e
else:
UpperCAmelCase__ : Dict = pos + 1
UpperCAmelCase__ : Tuple = text[pos:end]
if check_simbol(snake_case__ ):
result.append("<KIGOU>" )
elif checkuae(snake_case__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase__ : Union[str, Any] = end
return result
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : Dict="\n" ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase__ : int = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase__ : Union[str, Any] = "".join(snake_case__ )
return text
| 199 | import functools
from typing import Any
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : list[str] ) -> bool:
# Validation
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or not all(
isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase = {}
_lowerCamelCase = 'WORD_KEEPER'
for word in words:
_lowerCamelCase = trie
for c in word:
if c not in trie_node:
_lowerCamelCase = {}
_lowerCamelCase = trie_node[c]
_lowerCamelCase = True
_lowerCamelCase = len(UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase : int ) -> bool:
if index == len_string:
return True
_lowerCamelCase = trie
for i in range(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = trie_node.get(string[i] , UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(UpperCamelCase , UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 544 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: Any, **SCREAMING_SNAKE_CASE__: Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: str, **SCREAMING_SNAKE_CASE__: Any ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: Optional[int], **SCREAMING_SNAKE_CASE__: Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: List[Any], **SCREAMING_SNAKE_CASE__: int ) -> List[Any]:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: Union[str, Any], **SCREAMING_SNAKE_CASE__: str ) -> List[Any]:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: Optional[Any], **SCREAMING_SNAKE_CASE__: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE__: str, **SCREAMING_SNAKE_CASE__: Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(UpperCamelCase__, ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCAmelCase_ ):
__a =["torch"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def __UpperCamelCase ( cls , *lowerCamelCase , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] ) | 448 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
'''simple docstring'''
from math import sqrt
def a_ ( lowerCamelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( lowerCamelCase : int = 10001 ):
lowerCAmelCase = 0
lowerCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 513 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Any ):
lowerCAmelCase = OmegaConf.load(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )['model']
lowerCAmelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase = {}
lowerCAmelCase = 'first_stage_model.'
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCAmelCase = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase = {}
lowerCAmelCase = 'model.diffusion_model.'
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCAmelCase = state_dict[key]
lowerCAmelCase = config.model.params.first_stage_config.params
lowerCAmelCase = config.model.params.unet_config.params
lowerCAmelCase = VQModel(**lowerCamelCase ).eval()
vqvae.load_state_dict(lowerCamelCase )
lowerCAmelCase = UNetLDMModel(**lowerCamelCase ).eval()
unet.load_state_dict(lowerCamelCase )
lowerCAmelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCamelCase , )
lowerCAmelCase = LDMPipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipeline.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__snake_case =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 513 | 1 |
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
__magic_name__ : Tuple = 4
__magic_name__ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__magic_name__ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 154 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="openai/whisper-base"
lowerCamelCase__ : Any =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowerCamelCase__ : Union[str, Any] ="transcriber"
lowerCamelCase__ : List[str] =WhisperProcessor
lowerCamelCase__ : Tuple =WhisperForConditionalGeneration
lowerCamelCase__ : Tuple =["audio"]
lowerCamelCase__ : List[str] =["text"]
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(lowerCamelCase , return_tensors='''pt''' ).input_features
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 154 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCamelCase : str = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""audio_values""", """audio_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=2048 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : str=[16, 16] , UpperCamelCase__ : List[str]=128 , UpperCamelCase__ : Union[str, Any]=4_4100 , UpperCamelCase__ : Optional[int]=86 , UpperCamelCase__ : Dict=2048 , UpperCamelCase__ : Any=0.0 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : str = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Any = n_fft
SCREAMING_SNAKE_CASE : List[str] = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Dict = padding_value
SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __A ( self : str , UpperCamelCase__ : np.array ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = spectrogram(
UpperCamelCase__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
SCREAMING_SNAKE_CASE : Any = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(UpperCamelCase__ ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : List[str] = np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Any = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features[i]
SCREAMING_SNAKE_CASE : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'''audio_values''': padded_audio_features}
SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 34 | import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError('String lengths must match!' )
__a : List[Any] = 0
for chara, chara in zip(lowerCamelCase_ , lowerCamelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
__lowercase = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowercase = v
else:
__lowercase = v
__lowercase = chkpt['''params''']
__lowercase = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
__lowercase = chkpt['''dico_word2id''']
__lowercase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(lowercase , lowercase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '''\n''' )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 534 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = True
UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int =True
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = FlaxRobertaModelTester(self )
@slow
def _a (self ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('''roberta-base''' , from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 709 | __magic_name__ ={
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 469 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
A : List[Any] = "\nimport os\n"
A : Any = "\ndef foo():\n import os\n return False\n"
A : Dict = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
A : Optional[Any] = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
A : Optional[int] = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
A : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
A : Union[str, Any] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
A : Any = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
A : str = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
A : str = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
A : Optional[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = os.path.join(_UpperCamelCase , "test_file.py" )
with open(_UpperCamelCase , "w" ) as _tmp_file:
_tmp_file.write(_UpperCamelCase )
__lowerCAmelCase = get_imports(_UpperCamelCase )
assert parsed_imports == ["os"]
| 636 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
A : List[Any] = True
from torch.cuda.amp import autocast
A : Any = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
__UpperCAmelCase : Optional[bool] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCAmelCase : Optional[bool] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to log verbose messages or not."""} ,)
__UpperCAmelCase : Optional[float] =field(
default=2.0 ,metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.5 ,metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
__UpperCAmelCase : Optional[float] =field(
default=0.999_995 ,metadata={"""help""": """Decay of gumbel temperature during training."""} )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase = logging.WARNING
if model_args.verbose_logging:
__lowerCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowerCAmelCase = logging.INFO
logger.setLevel(_UpperCamelCase )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase : Optional[str] =field(
default="""train""" ,metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} ,)
__UpperCAmelCase : Optional[str] =field(
default="""validation""" ,metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} ,)
__UpperCAmelCase : Optional[str] =field(
default="""file""" ,metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} ,)
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__UpperCAmelCase : Optional[int] =field(
default=1 ,metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} ,)
__UpperCAmelCase : Optional[int] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
__UpperCAmelCase : Optional[float] =field(
default=20.0 ,metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : WavaVecaForPreTraining
__UpperCAmelCase : WavaVecaFeatureExtractor
__UpperCAmelCase : Union[bool, str] ="longest"
__UpperCAmelCase : Optional[int] =None
__UpperCAmelCase : Optional[int] =None
def __call__( self , __a ):
# reformat list to dict and set to pytorch format
__lowerCAmelCase = self.feature_extractor.pad(
__a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
__lowerCAmelCase = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
__lowerCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowerCAmelCase = 1
__lowerCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__lowerCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__a , min_masks=2 , )
return batch
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , __a=1 , __a=0 , __a=1.0 , **__a ):
super().__init__(*__a , **__a )
__lowerCAmelCase = 0
__lowerCAmelCase = max_gumbel_temp
__lowerCAmelCase = min_gumbel_temp
__lowerCAmelCase = gumbel_temp_decay
def snake_case ( self , __a , __a ):
model.train()
__lowerCAmelCase = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(__a , __a )
else:
__lowerCAmelCase = self.compute_loss(__a , __a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase )
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase = DatasetDict()
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase )
def prepare_dataset(_UpperCamelCase ):
# check that all files have the correct sampling rate
__lowerCAmelCase , __lowerCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowerCAmelCase = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
__lowerCAmelCase = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowerCAmelCase = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowerCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
__lowerCAmelCase = WavaVecaForPreTraining(_UpperCamelCase )
__lowerCAmelCase = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
__lowerCAmelCase = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 636 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE__ = random.Random()
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=1.0 , _UpperCamelCase : Tuple=None , _UpperCamelCase : List[Any]=None ) -> Tuple:
"""simple docstring"""
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=4_00 , lowerCAmelCase=20_00 , lowerCAmelCase=24 , lowerCAmelCase=24 , lowerCAmelCase=0.0 , lowerCAmelCase=1_60_00 , lowerCAmelCase=True , lowerCAmelCase=True , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = feature_size
snake_case = num_mel_bins
snake_case = padding_value
snake_case = sampling_rate
snake_case = return_attention_mask
snake_case = do_normalize
def snake_case ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self , lowerCAmelCase=False , lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Any = SpeechaTextFeatureExtractor if is_speech_available() else None
def snake_case ( self ):
"""simple docstring"""
snake_case = SpeechaTextFeatureExtractionTester(self )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case = feature_extractor(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
snake_case = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# Test batched
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' ).input_features
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(lowerCAmelCase )
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' ).input_features
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = ['longest', 'max_length', 'do_not_pad']
snake_case = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase , lowerCAmelCase ):
snake_case = feature_extractor(
lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_attention_mask=lowerCAmelCase )
snake_case = inputs.input_features
snake_case = inputs.attention_mask
snake_case = [np.sum(lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = ['longest', 'max_length', 'do_not_pad']
snake_case = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase , lowerCAmelCase ):
snake_case = feature_extractor(
lowerCAmelCase , max_length=lowerCAmelCase , padding=lowerCAmelCase , return_tensors='np' , return_attention_mask=lowerCAmelCase )
snake_case = inputs.input_features
snake_case = inputs.attention_mask
snake_case = [np.sum(lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feature_extractor(
lowerCAmelCase , padding='max_length' , max_length=4 , truncation=lowerCAmelCase , return_tensors='np' , return_attention_mask=lowerCAmelCase , )
snake_case = inputs.input_features
snake_case = inputs.attention_mask
snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feature_extractor(
lowerCAmelCase , padding='longest' , max_length=4 , truncation=lowerCAmelCase , return_tensors='np' , return_attention_mask=lowerCAmelCase , )
snake_case = inputs.input_features
snake_case = inputs.attention_mask
snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = feature_extractor(
lowerCAmelCase , padding='longest' , max_length=16 , truncation=lowerCAmelCase , return_tensors='np' , return_attention_mask=lowerCAmelCase , )
snake_case = inputs.input_features
snake_case = inputs.attention_mask
snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def snake_case ( self ):
"""simple docstring"""
import torch
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = np.random.rand(1_00 , 32 ).astype(np.floataa )
snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
from datasets import load_dataset
snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case = ds.sort('id' ).select(range(lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
"""simple docstring"""
snake_case = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
snake_case = self._load_datasamples(1 )
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = feature_extractor(lowerCAmelCase , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCAmelCase , atol=1E-4 ) )
| 709 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __UpperCamelCase ( *a : Any , **a : str ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = hashlib.mda(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __UpperCamelCase ( self : Tuple , a : int , a : Any , a : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase ( self : int , a : List[str] , a : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , a )
import datasets
SCREAMING_SNAKE_CASE : Tuple = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE : List[Any] = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "Intel/dpt-large"
SCREAMING_SNAKE_CASE : Optional[int] = pipeline("depth-estimation" , model=a )
SCREAMING_SNAKE_CASE : str = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" ) | 25 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Any = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe(num_inference_steps=2 , generator=a , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = "google/ncsnpp-celebahq-256"
SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE : Any = KarrasVeScheduler()
SCREAMING_SNAKE_CASE : Optional[Any] = KarrasVePipeline(unet=a , scheduler=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe(num_inference_steps=20 , generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 256 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.18_215 , SCREAMING_SNAKE_CASE__ : str = "group" , ) -> Optional[int]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.25 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def a ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(SCREAMING_SNAKE_CASE__ ).latents
lowerCAmelCase__ = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 125 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "gpt_neo"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=50_257 , SCREAMING_SNAKE_CASE__ : List[Any]=2_048 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=24 , SCREAMING_SNAKE_CASE__ : List[str]=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=256 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=1e-5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[int]=50_256 , SCREAMING_SNAKE_CASE__ : Any=50_256 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> str:
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = window_size
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_dropout
lowerCAmelCase__ = embed_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = attention_types
lowerCAmelCase__ = self.expand_attention_types_params(SCREAMING_SNAKE_CASE__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
lowerCAmelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
import torch
lowerCAmelCase__ = input.size()
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = shape[dimension]
lowerCAmelCase__ = torch.arange(0 , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = torch.div(sizedim - size , lowerCAmelCase_ , rounding_mode="floor" ) + 1
lowerCAmelCase__ = torch.arange(lowerCAmelCase_ ) + low_indices[:min_length][:, None]
lowerCAmelCase__ = [slice(lowerCAmelCase_ )] * rank
lowerCAmelCase__ = indices
lowerCAmelCase__ = input[s]
lowerCAmelCase__ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
import torch
lowerCAmelCase__ = torch.arange(1 , lowerCAmelCase_ )
lowerCAmelCase__ = torch.remainder(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = remainders == 0
lowerCAmelCase__ = candidates[divisor_indices]
lowerCAmelCase__ = torch.max(lowerCAmelCase_ )
return largest_divisor, torch.div(lowerCAmelCase_ , lowerCAmelCase_ , rounding_mode="floor" )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="inputs" )
lowerCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a ( self : str ) -> int:
return self._config.num_heads
def a ( self : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowerCAmelCase__ = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase__ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def a ( self : str ) -> int:
return 13
| 125 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCAmelCase_ : List[str] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCAmelCase_ : Union[str, Any] = dataset.iloc[:, 1:2].values
UpperCAmelCase_ : Dict = dataset.iloc[:, 2].values
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCAmelCase_ : Union[str, Any] = PolynomialFeatures(degree=4)
UpperCAmelCase_ : Any = poly_reg.fit_transform(X)
UpperCAmelCase_ : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ):
plt.scatter(lowerCamelCase , lowerCamelCase , color="""red""" )
plt.plot(lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 21 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
__a = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=0 , UpperCamelCase=True ) -> Dict:
if str(UpperCamelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCamelCase )
else:
__a = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**UpperCamelCase )
__a = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = self.get_dummy_inputs(UpperCamelCase )
inputs.update({'image_embeds': None} )
__a = sd_pipe(**UpperCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Any:
__a = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> str:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
UpperCamelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 539 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCamelCase : Optional[Any] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __a ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]:
require_version(deps[pkg] , __lowerCAmelCase ) | 308 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 'transfo-xl'
UpperCAmelCase : List[str] = ['mems']
UpperCAmelCase : Optional[Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[int] , snake_case : Tuple=267735 , snake_case : Optional[Any]=[20000, 40000, 200000] , snake_case : List[Any]=1024 , snake_case : List[Any]=1024 , snake_case : List[Any]=16 , snake_case : int=64 , snake_case : Optional[int]=4096 , snake_case : Union[str, Any]=4 , snake_case : List[str]=False , snake_case : int=18 , snake_case : List[Any]=1600 , snake_case : Union[str, Any]=1000 , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=0 , snake_case : Dict=-1 , snake_case : List[Any]=True , snake_case : Any=0.1 , snake_case : List[Any]=0.0 , snake_case : List[str]=True , snake_case : Optional[Any]="normal" , snake_case : Optional[Any]=0.01 , snake_case : Union[str, Any]=0.01 , snake_case : List[str]=0.02 , snake_case : List[str]=1E-5 , snake_case : Optional[int]=0 , **snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = []
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE : Tuple = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE : List[Any] = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : Any = d_embed
SCREAMING_SNAKE_CASE : Tuple = d_head
SCREAMING_SNAKE_CASE : Union[str, Any] = d_inner
SCREAMING_SNAKE_CASE : Tuple = div_val
SCREAMING_SNAKE_CASE : int = pre_lnorm
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : List[str] = n_head
SCREAMING_SNAKE_CASE : Dict = mem_len
SCREAMING_SNAKE_CASE : Dict = same_length
SCREAMING_SNAKE_CASE : Union[str, Any] = attn_type
SCREAMING_SNAKE_CASE : str = clamp_len
SCREAMING_SNAKE_CASE : Any = sample_softmax
SCREAMING_SNAKE_CASE : Optional[int] = adaptive
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = dropatt
SCREAMING_SNAKE_CASE : List[str] = untie_r
SCREAMING_SNAKE_CASE : Union[str, Any] = init
SCREAMING_SNAKE_CASE : Optional[int] = init_range
SCREAMING_SNAKE_CASE : Tuple = proj_init_std
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=snake_case , **snake_case )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) | 308 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
# TODO Update this
_snake_case : Optional[Any] = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A ( lowerCAmelCase__ ):
lowercase_ = """esm"""
def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=7_68 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=30_72 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Tuple=10_26 , lowerCAmelCase_ : Any=0.0_2 , lowerCAmelCase_ : List[Any]=1e-12 , lowerCAmelCase_ : Dict="absolute" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : int , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__a , mask_token_id=__a , **__a )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = emb_layer_norm_before
_a = token_dropout
_a = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_a = EsmFoldConfig()
elif isinstance(__a , __a ):
_a = EsmFoldConfig(**__a )
_a = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_a = get_default_vocab_list()
else:
_a = vocab_list
else:
_a = None
_a = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __a ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_a = super().to_dict()
if isinstance(self.esmfold_config , __a ):
_a = self.esmfold_config.to_dict()
return output
@dataclass
class A :
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 128
lowercase_ = None
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
if self.trunk is None:
_a = TrunkConfig()
elif isinstance(self.trunk , __a ):
_a = TrunkConfig(**self.trunk )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = asdict(self )
_a = self.trunk.to_dict()
return output
@dataclass
class A :
lowercase_ = 48
lowercase_ = 1024
lowercase_ = 128
lowercase_ = 32
lowercase_ = 32
lowercase_ = 32
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 128
lowercase_ = None
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
if self.structure_module is None:
_a = StructureModuleConfig()
elif isinstance(self.structure_module , __a ):
_a = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
_a = self.sequence_state_dim // self.sequence_head_width
_a = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = asdict(self )
_a = self.structure_module.to_dict()
return output
@dataclass
class A :
lowercase_ = 384
lowercase_ = 128
lowercase_ = 16
lowercase_ = 128
lowercase_ = 12
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 10
lowercase_ = 1e-8
lowercase_ = 1e5
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return asdict(self )
def snake_case_ ():
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 22 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
| 636 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
__lowerCamelCase = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(default=UpperCAmelCase_ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCamelCase :
__lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
__lowerCamelCase = field(
default=UpperCAmelCase_ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=UpperCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ()-> Tuple:
snake_case: Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
snake_case: Optional[int] = import_module("""tasks""" )
try:
snake_case: int = getattr(UpperCamelCase__ , model_args.task_type )
snake_case: List[Any] = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case: int = token_classification_task.get_labels(data_args.labels )
snake_case: Optional[int] = dict(enumerate(UpperCamelCase__ ) )
snake_case: Union[str, Any] = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , cache_dir=model_args.cache_dir , )
snake_case: str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case: List[str] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case: Tuple = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case: Dict = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowerCAmelCase : str , _lowerCAmelCase : int ) -> Tuple[List[int], List[int]]:
snake_case: Dict = np.argmax(UpperCamelCase__ , axis=2 )
snake_case , snake_case: Optional[Any] = preds.shape
snake_case: int = [[] for _ in range(UpperCamelCase__ )]
snake_case: Dict = [[] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowerCAmelCase : int ) -> Dict:
snake_case , snake_case: List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
# Data collator
snake_case: Any = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case: int = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case: Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case: Dict = trainer.evaluate()
snake_case: Union[str, Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , UpperCamelCase__ , UpperCamelCase__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(UpperCamelCase__ )
# Predict
if training_args.do_predict:
snake_case: str = TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case , snake_case , snake_case: Any = trainer.predict(UpperCamelCase__ )
snake_case , snake_case: int = align_predictions(UpperCamelCase__ , UpperCamelCase__ )
snake_case: Tuple = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , UpperCamelCase__ , UpperCamelCase__ )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
snake_case: str = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return results
def a_ (_lowerCAmelCase : Optional[int] )-> str:
main()
if __name__ == "__main__":
main()
| 703 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = '▁'
__lowerCAmelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__lowerCAmelCase : int = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__lowerCAmelCase : int = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
'''simple docstring'''
snake_case: Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
snake_case: Tuple = vocab_file
snake_case: Dict = monolingual_vocab_file
snake_case: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case: int = {}
snake_case: int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case: Optional[int] = cnt
cnt += 1
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
snake_case: Optional[int] = line.strip().split()[0]
snake_case: List[Any] = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case: Tuple = len(self.fairseq_tokens_to_ids )
snake_case: str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> int:
'''simple docstring'''
snake_case: Optional[Any] = self.__dict__.copy()
snake_case: List[str] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
snake_case: str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case: List[Any] = {}
snake_case: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case: Optional[int] = [self.cls_token_id]
snake_case: Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case: Tuple = [self.sep_token_id]
snake_case: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: Union[str, Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case: Tuple = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , """ """ ).strip()
return out_string
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case: Tuple = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case: Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
snake_case: str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(__lowerCamelCase )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 164 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase__ ( A_: int=None ) -> str:
"""simple docstring"""
if subparsers is not None:
__UpperCAmelCase =subparsers.add_parser("""test""" )
else:
__UpperCAmelCase =argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=A_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A_ )
return parser
def lowercase__ ( A_: List[Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__UpperCAmelCase =script_name
else:
__UpperCAmelCase =F'''--config_file={args.config_file} {script_name}'''
__UpperCAmelCase =["""accelerate-launch"""] + test_args.split()
__UpperCAmelCase =execute_subprocess_async(A_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =test_command_parser()
__UpperCAmelCase =parser.parse_args()
test_command(A_ )
if __name__ == "__main__":
main()
| 68 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
def decorator(A_: int ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += [key]
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
def lowercase__ ( *A_: List[str] ) -> Optional[int]:
"""simple docstring"""
def decorator(A_: Tuple ):
__UpperCAmelCase =getattr(A_ , """handle_key""" , [] )
handle += keys
setattr(A_ , """handle_key""" , A_ )
return func
return decorator
class _A ( UpperCamelCase ):
"""simple docstring"""
def __new__( cls : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
__UpperCAmelCase =super().__new__(cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not hasattr(__SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(__SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(__SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
__UpperCAmelCase =value
return new_cls
@staticmethod
def _a ( cls : Dict ) -> List[Any]:
__UpperCAmelCase =get_character()
if char != KEYMAP["undefined"]:
__UpperCAmelCase =ord(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =cls.key_handler.get(__SCREAMING_SNAKE_CASE )
if handler:
__UpperCAmelCase =char
return handler(cls )
else:
return None
def lowercase__ ( cls: str ) -> int:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 68 | 1 |
import argparse
import os
import re
a__ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
a__ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
a__ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
a__ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( a : List[Any] ):
snake_case__ = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( a : List[Any] , a : str="" , a : Dict=None , a : Optional[Any]=None ):
snake_case__ = 0
snake_case__ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
snake_case__ = ["""\n""".join(lines[:index] )]
else:
snake_case__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
snake_case__ = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(a__ ) )
if index < len(a__ ) - 1:
snake_case__ = [lines[index + 1]]
index += 1
else:
snake_case__ = []
else:
blocks.append("""\n""".join(a__ ) )
snake_case__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append("""\n""".join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( a : Any ):
def _inner(a : Union[str, Any] ):
return key(a__ ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( a : List[Any] , a : str=None ):
def noop(a : List[Any] ):
return x
if key is None:
snake_case__ = noop
# Constants are all uppercase, they go first.
snake_case__ = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
snake_case__ = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
snake_case__ = [obj for obj in objects if not key(a__ )[0].isupper()]
snake_case__ = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def _UpperCAmelCase ( a : Optional[Any] ):
def _replace(a : Dict ):
snake_case__ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
snake_case__ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(a__ )] ) + "]"
snake_case__ = import_statement.split("""\n""" )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
snake_case__ = 2 if lines[1].strip() == """[""" else 1
snake_case__ = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
snake_case__ = sort_objects(a__ , key=lambda a : x[1] )
snake_case__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
snake_case__ = _re_bracket_content.sub(_replace , lines[1] )
else:
snake_case__ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
snake_case__ = keys[:-1]
snake_case__ = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
snake_case__ = _re_bracket_content.sub(_replace , a__ )
return import_statement
def _UpperCAmelCase ( a : Dict , a : str=True ):
with open(a__ , """r""" ) as f:
snake_case__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
snake_case__ = split_code_in_indented_blocks(
a__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
snake_case__ = main_blocks[block_idx]
snake_case__ = block.split("""\n""" )
# Get to the start of the imports.
snake_case__ = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
snake_case__ = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
snake_case__ = """\n""".join(block_lines[line_idx:-1] )
snake_case__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
snake_case__ = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
snake_case__ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
snake_case__ = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
snake_case__ = [(i, key) for i, key in enumerate(a__ ) if key is not None]
snake_case__ = [x[0] for x in sorted(a__ , key=lambda a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
snake_case__ = 0
snake_case__ = []
for i in range(len(a__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
snake_case__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
snake_case__ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(a__ , """w""" ) as f:
f.write("""\n""".join(a__ ) )
def _UpperCAmelCase ( a : Any=True ):
snake_case__ = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
snake_case__ = sort_imports(os.path.join(a__ , """__init__.py""" ) , check_only=a__ )
if result:
snake_case__ = [os.path.join(a__ , """__init__.py""" )]
if len(a__ ) > 0:
raise ValueError(F'''Would overwrite {len(a__ )} files, run `make style`.''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 714 |
from __future__ import annotations
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = data
snake_case__ = None
snake_case__ = None
def _UpperCAmelCase ( a : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _UpperCAmelCase ( a : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _UpperCAmelCase ( a : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _UpperCAmelCase ( ): # Main function for testing.
snake_case__ = Node(1 )
snake_case__ = Node(2 )
snake_case__ = Node(3 )
snake_case__ = Node(4 )
snake_case__ = Node(5 )
snake_case__ = Node(6 )
snake_case__ = Node(7 )
snake_case__ = Node(8 )
snake_case__ = Node(9 )
print(is_full_binary_tree(a ) )
print(depth_of_tree(a ) )
print("""Tree is: """ )
display(a )
if __name__ == "__main__":
main()
| 99 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _A ( __lowercase = 200_0000 ):
"""simple docstring"""
lowerCamelCase__ = [0]
lowerCamelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCamelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
lowerCamelCase__ = 0
# an estimate of b, using the quadratic formula
lowerCamelCase__ = 42
# the largest integer less than b_estimate
lowerCamelCase__ = 42
# the largest integer less than b_estimate
lowerCamelCase__ = 42
# the triangle number corresponding to b_floor
lowerCamelCase__ = 42
# the triangle number corresponding to b_ceil
lowerCamelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCamelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCamelCase__ = floor(__lowercase )
lowerCamelCase__ = ceil(__lowercase )
lowerCamelCase__ = triangle_numbers[b_floor]
lowerCamelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCamelCase__ = triangle_b_first_guess * triangle_a
lowerCamelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCamelCase__ = triangle_b_second_guess * triangle_a
lowerCamelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }')
| 129 |
"""simple docstring"""
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
while second != 0:
lowerCamelCase__ = first & second
first ^= second
lowerCamelCase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = int(input("""Enter the first number: """).strip())
__magic_name__ = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 129 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCamelCase ( ):
lowercase__ : Optional[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
lowercase__ : Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' )
return image
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = dct.pop(__UpperCAmelCase )
lowercase__ : int = val
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ : Union[str, Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase__ : Any = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase__ : Tuple = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
lowercase__ : Optional[Any] = qkv_bias
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[Any] = 364 if '''coco''' in model_name else 224
lowercase__ : int = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase__ : List[str] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase__ : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase__ : Optional[int] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
lowercase__ : Tuple = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ):
lowercase__ : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
lowercase__ : str = tokenizer('''\n''' , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowercase__ , lowercase__ : Any = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowercase__ : List[str] = BlipaForConditionalGeneration(__UpperCAmelCase ).eval()
lowercase__ : List[str] = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
lowercase__ , lowercase__ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__ : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowercase__ , lowercase__ , lowercase__ : Dict = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__ : Dict = original_model.state_dict()
lowercase__ : Union[str, Any] = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ : Tuple = state_dict.pop(__UpperCAmelCase )
if key.startswith('''Qformer.bert''' ):
lowercase__ : List[Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
lowercase__ : int = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__ : Optional[int] = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
lowercase__ : Union[str, Any] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
lowercase__ : int = key.replace('''t5''' , '''language''' )
lowercase__ : List[str] = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ , lowercase__ : Optional[Any] = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase__ : List[Any] = load_demo_image()
lowercase__ : List[str] = vis_processors['''eval'''](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
lowercase__ : List[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase )
# create processor
lowercase__ : Any = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
lowercase__ : Any = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowercase__ : Optional[int] = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase__ : Union[str, Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
lowercase__ : int = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits
else:
lowercase__ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
lowercase__ : Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowercase__ : Any = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase__ : Tuple = torch.tensor(
[[-41.5850, -4.4_4_4_0, -8.9_9_2_2], [-47.4322, -5.9_1_4_3, -1.7_3_4_0]] , device=__UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase__ : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8_9_6_7, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__UpperCAmelCase )
else:
# cast to same type
lowercase__ : Any = logits.dtype
assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
lowercase__ : str = ''''''
lowercase__ : Any = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids.to(__UpperCAmelCase )
lowercase__ : Tuple = original_model.generate({'''image''': original_pixel_values} )
lowercase__ : Optional[Any] = hf_model.generate(
__UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __UpperCAmelCase )
lowercase__ : Dict = input_ids.shape[1]
lowercase__ : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase )
lowercase__ : int = [text.strip() for text in output_text]
print('''HF generation:''' , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__a: Dict = argparse.ArgumentParser()
__a: Tuple = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__a: int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 713 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : Union[str, Any] = parent
lowercase__ : Union[str, Any] = 13
lowercase__ : Dict = 7
lowercase__ : Optional[Any] = True
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = True
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = 99
lowercase__ : Dict = 32
lowercase__ : Optional[int] = 2
lowercase__ : str = 4
lowercase__ : List[str] = 37
lowercase__ : Tuple = '''gelu'''
lowercase__ : Optional[int] = 0.1
lowercase__ : Optional[Any] = 0.1
lowercase__ : Dict = 512
lowercase__ : Optional[Any] = 16
lowercase__ : int = 2
lowercase__ : int = 0.0_2
lowercase__ : str = 3
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[Any] = None
def _lowerCAmelCase( self ) -> str:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Tuple = None
if self.use_input_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = None
lowercase__ : Union[str, Any] = None
lowercase__ : Any = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : Any = TFRoFormerModel(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ : Union[str, Any] = [input_ids, input_mask]
lowercase__ : Union[str, Any] = model(__lowerCAmelCase )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Optional[Any] = True
lowercase__ : str = TFRoFormerForCausalLM(config=__lowerCAmelCase )
lowercase__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Dict = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : List[str] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : Tuple = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.num_choices
lowercase__ : Dict = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
lowercase__ : List[str] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : List[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = self.num_labels
lowercase__ : List[str] = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
lowercase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Dict = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = TFRoFormerModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : str = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
lowercase__ : str = 50000
lowercase__ : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ : Union[str, Any] = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = tf.constant([[4, 10]] )
lowercase__ : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase__ : Optional[int] = emba(input_ids.shape )
lowercase__ : Optional[Any] = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : List[Any] = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> Tuple:
# 2,12,16,64
lowercase__ : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase__ : Tuple = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ , lowercase__ : Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase__ : Tuple = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 428 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'open-llama'
def __init__( self , _a=100_000 , _a=4_096 , _a=11_008 , _a=32 , _a=32 , _a="silu" , _a=2_048 , _a=0.02 , _a=1E-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=False , _a=True , _a=0.1 , _a=0.1 , _a=True , _a=True , _a=None , **_a , ):
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = initializer_range
__a = rms_norm_eps
__a = use_cache
__a = kwargs.pop(
'''use_memorry_efficient_attention''' , _a )
__a = hidden_dropout_prob
__a = attention_dropout_prob
__a = use_stable_embedding
__a = shared_input_output_embedding
__a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def __UpperCAmelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__a = self.rope_scaling.get('''type''' , _a )
__a = self.rope_scaling.get('''factor''' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 695 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 1_00, ) -> float:
A_ = x_start
A_ = fnc(UpperCAmelCase__ )
A_ = 0.0
for _ in range(UpperCAmelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ = (x_end - x_start) / steps + xa
A_ = fnc(UpperCAmelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ = xa
A_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__lowerCamelCase = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
def _A ( __magic_name__ ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowercase__ = gray_code_sequence_string(__magic_name__ )
#
# convert them to integers
for i in range(len(__magic_name__ ) ):
lowercase__ = int(sequence[i] , 2 )
return sequence
def _A ( __magic_name__ ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ = gray_code_sequence_string(bit_count - 1 )
lowercase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ = "0" + smaller_sequence[i]
sequence.append(__magic_name__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ = "1" + smaller_sequence[i]
sequence.append(__magic_name__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ = parser.parse_args()
return args.f
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''all_results.json''' )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , '''r''' ) as f:
UpperCAmelCase_ = json.load(_UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : int ) ->List[str]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->List[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase__ )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Dict ) ->Any:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 43 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MvpTokenizer
lowerCAmelCase__ = MvpTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_roberta_detectors
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
super().setUp()
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->Dict:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase__ )
self.assertIn('''attention_mask''' , UpperCAmelCase__ )
self.assertNotIn('''labels''' , UpperCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ) ->int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
UpperCAmelCase_ = ['''A long paragraph for summarization.''']
UpperCAmelCase_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase_ = inputs['''input_ids''']
UpperCAmelCase_ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 43 | 1 |
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
A_ : Optional[Any] = len(_lowerCAmelCase )
A_ : Dict = max(_lowerCAmelCase )
A_ : List[Any] = min(_lowerCAmelCase )
# create the counting array
A_ : str = coll_max + 1 - coll_min
A_ : List[str] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,_lowerCAmelCase ):
A_ : List[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A_ : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,_lowerCAmelCase ) ):
A_ : Optional[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return "".join([chr(_lowerCAmelCase ) for i in counting_sort([ord(_lowerCAmelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
_lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 569 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = StableDiffusionSAGPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = False
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : Optional[int] = CLIPTextModel(a__ )
A_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCamelCase ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(a__ )
else:
A_ : Optional[int] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : List[Any] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
A_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A_ : Tuple = sag_pipe.to(a__ )
sag_pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = """."""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : str = sag_pipe(
[prompt] , generator=a__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Tuple = output.images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _lowerCamelCase ( self ):
A_ : Dict = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : List[str] = sag_pipe.to(a__ )
sag_pipe.set_progress_bar_config(disable=a__ )
A_ : List[str] = """."""
A_ : List[Any] = torch.manual_seed(0 )
A_ : List[str] = sag_pipe(
[prompt] , generator=a__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Union[str, Any] = output.images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : str = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _lowerCamelCase ( self ):
A_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : Tuple = sag_pipe.to(a__ )
sag_pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = """."""
A_ : Any = torch.manual_seed(0 )
A_ : Optional[int] = sag_pipe(
[prompt] , width=768 , height=512 , generator=a__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
A_ : Optional[int] = output.images
assert image.shape == (1, 512, 768, 3)
| 569 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Optional[Any] = logging.get_logger()
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : list = field(default_factory=__UpperCAmelCase )
def _a (self , lowercase , lowercase , lowercase ):
A_ : str = len(list(m.modules() ) ) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE )
def __call__(self , lowercase ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def _a (self ):
return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List = field(default_factory=__UpperCAmelCase )
def __call__(self , lowercase ):
A_ : Optional[Any] = Tracker(self.dest )(__SCREAMING_SNAKE_CASE ).parametrized
A_ : int = Tracker(self.src )(__SCREAMING_SNAKE_CASE ).parametrized
A_ : Optional[Any] = list(filter(lambda lowercase : type(__SCREAMING_SNAKE_CASE ) not in self.src_skip , __SCREAMING_SNAKE_CASE ) )
A_ : Tuple = list(filter(lambda lowercase : type(__SCREAMING_SNAKE_CASE ) not in self.dest_skip , __SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise Exception(
F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(__SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
A_ : Any = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
A_ : int = ResNetForImageClassification(_UpperCAmelCase ).eval()
A_ : Union[str, Any] = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase )
A_ : Optional[int] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
A_ : Tuple = f'resnet{"-".join(name.split("resnet" ) )}'
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=_UpperCAmelCase , )
# we can use the convnext one
A_ : Dict = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=_UpperCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def a ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
'''simple docstring'''
A_ : int = """imagenet-1k-id2label.json"""
A_ : Any = 10_00
A_ : List[Any] = (1, num_labels)
A_ : str = """huggingface/label-files"""
A_ : str = num_labels
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A_ : Dict = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
A_ : List[Any] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
A_ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 716 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = os.path.dirname(os.path.realpath(__a ) )
SCREAMING_SNAKE_CASE_ = os.path.join(__a, '''words.txt''' )
SCREAMING_SNAKE_CASE_ = ''''''
with open(__a ) as f:
SCREAMING_SNAKE_CASE_ = f.readline()
SCREAMING_SNAKE_CASE_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
SCREAMING_SNAKE_CASE_ = [
word
for word in [sum(ord(__a ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__a )
if __name__ == "__main__":
print(solution()) | 626 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase ( __a=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''', default=__a, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_ = script_name
else:
SCREAMING_SNAKE_CASE_ = F'--config_file={args.config_file} {script_name}'
SCREAMING_SNAKE_CASE_ = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE_ = execute_subprocess_async(__a, env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = test_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main() | 626 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : Tuple , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = path_or_paths
__SCREAMING_SNAKE_CASE = split if split or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else "train"
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase_ ( self : Tuple ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : int , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[Dataset, IterableDataset]:
pass
| 553 |
"""simple docstring"""
from __future__ import annotations
import math
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int ) -> None:
__SCREAMING_SNAKE_CASE = size
# approximate the overall size of segment tree with given value
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )]
__SCREAMING_SNAKE_CASE = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : int ) -> int:
return idx * 2
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int ) -> int:
return idx * 2 + 1
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] ) -> None:
if left_element == right_element:
__SCREAMING_SNAKE_CASE = a[left_element - 1]
else:
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.build(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max(
self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__SCREAMING_SNAKE_CASE = val
if left_element != right_element:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
return True
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.update(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max(
self.segment_tree[self.left(UpperCAmelCase__ )] , self.segment_tree[self.right(UpperCAmelCase__ )] )
return True
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int | float:
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = self.lazy[idx]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__SCREAMING_SNAKE_CASE = (left_element + right_element) // 2
__SCREAMING_SNAKE_CASE = self.query(self.left(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.query(self.right(UpperCAmelCase__ ) , mid + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return max(UpperCAmelCase__ , UpperCAmelCase__ )
def __str__( self : int ) -> str:
return str([self.query(1 , 1 , self.size , UpperCAmelCase__ , UpperCAmelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a__ : Tuple = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a__ : Dict = 1_5
a__ : int = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 553 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """ChineseCLIPImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 301 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
# Construct model
if openai_config_file == "":
UpperCamelCase = OpenAIGPTConfig()
else:
UpperCamelCase = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
UpperCamelCase = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
UpperCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __UpperCamelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 301 | 1 |
def A__ ( __lowerCAmelCase : int | float | str ):
try:
lowerCamelCase__ = float(__lowerCAmelCase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
lowerCamelCase__ = decimal - int(__lowerCAmelCase )
if fractional_part == 0:
return int(__lowerCAmelCase ), 1
else:
lowerCamelCase__ = len(str(__lowerCAmelCase ).split(""".""" )[1] )
lowerCamelCase__ = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase__ = 10**number_of_frac_digits
lowerCamelCase__ , lowerCamelCase__ = denominator, numerator
while True:
lowerCamelCase__ = dividend % divisor
if remainder == 0:
break
lowerCamelCase__ , lowerCamelCase__ = divisor, remainder
lowerCamelCase__ , lowerCamelCase__ = numerator / divisor, denominator / divisor
return int(__lowerCAmelCase ), int(__lowerCAmelCase )
if __name__ == "__main__":
print(F'{decimal_to_fraction(2) = }')
print(F'{decimal_to_fraction(89.0) = }')
print(F'{decimal_to_fraction("67") = }')
print(F'{decimal_to_fraction("45.0") = }')
print(F'{decimal_to_fraction(1.5) = }')
print(F'{decimal_to_fraction("6.25") = }')
print(F'{decimal_to_fraction("78td") = }')
| 715 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[int] ):
lowercase__ : str = ["a", "b", "c"]
# Defaults to last layer if both are None
lowercase__ , lowercase__ : str = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
lowercase__ , lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(["a", "c"] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
lowercase__ , lowercase__ : List[str] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
lowercase__ , lowercase__ : Optional[int] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def snake_case ( self : List[str] ):
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = BackboneMixin()
lowercase__ : Tuple = ["a", "b", "c"]
lowercase__ : Dict = ["a", "c"]
lowercase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ : Optional[Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ : Union[str, Any] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 496 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Dict:
A__ = ["a", "b", "c"]
# Defaults to last layer if both are None
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
A__ , A__ = get_aligned_output_features_output_indices(["a", "c"] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
A__ , A__ = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["a", "c"] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def snake_case__ ( self ) -> Dict:
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def snake_case__ ( self ) -> List[Any]:
A__ = BackboneMixin()
A__ = ["a", "b", "c"]
A__ = ["a", "c"]
A__ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A__ = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
A__ = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCamelCase :
def __init__( self , lowercase__):
__UpperCAmelCase : Any = data
__UpperCAmelCase : Node | None = None
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
def __iter__( self):
__UpperCAmelCase : Union[str, Any] = self.head
while self.head:
yield node.data
__UpperCAmelCase : int = node.next
if node == self.head:
break
def __len__( self):
return sum(1 for _ in self)
def __repr__( self):
return "->".join(str(lowercase__) for item in iter(self))
def A( self , lowercase__):
self.insert_nth(len(self) , lowercase__)
def A( self , lowercase__):
self.insert_nth(0 , lowercase__)
def A( self , lowercase__ , lowercase__):
if index < 0 or index > len(self):
raise IndexError('''list index out of range.''')
__UpperCAmelCase : int = Node(lowercase__)
if self.head is None:
__UpperCAmelCase : Optional[int] = new_node # first node points itself
__UpperCAmelCase : List[str] = new_node
elif index == 0: # insert at head
__UpperCAmelCase : Any = self.head
__UpperCAmelCase : Optional[Any] = new_node
else:
__UpperCAmelCase : Optional[Any] = self.head
for _ in range(index - 1):
__UpperCAmelCase : Any = temp.next
__UpperCAmelCase : Dict = temp.next
__UpperCAmelCase : Optional[Any] = new_node
if index == len(self) - 1: # insert at tail
__UpperCAmelCase : Optional[int] = new_node
def A( self):
return self.delete_nth(0)
def A( self):
return self.delete_nth(len(self) - 1)
def A( self , lowercase__ = 0):
if not 0 <= index < len(self):
raise IndexError('''list index out of range.''')
__UpperCAmelCase : Dict = self.head
if self.head == self.tail: # just one node
__UpperCAmelCase : List[str] = None
elif index == 0: # delete head node
__UpperCAmelCase : Optional[int] = self.tail.next.next
__UpperCAmelCase : List[str] = self.head.next
else:
__UpperCAmelCase : Optional[int] = self.head
for _ in range(index - 1):
__UpperCAmelCase : Dict = temp.next
__UpperCAmelCase : Union[str, Any] = temp.next
__UpperCAmelCase : Optional[Any] = temp.next.next
if index == len(self) - 1: # delete at tail
__UpperCAmelCase : str = temp
return delete_node.data
def A( self):
return len(self) == 0
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Dict = CircularLinkedList()
assert len(lowercase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase_ ) == i
circular_linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__lowerCamelCase : int = hex_num[0] == '-'
if is_negative:
__lowerCamelCase : int = hex_num[1:]
try:
__lowerCamelCase : Optional[int] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__lowerCamelCase : Optional[Any] = ''
while int_num > 0:
__lowerCamelCase : Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_attention_heads' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=640 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=None , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = last_hidden_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = conv_kernel_size
lowerCamelCase_ = output_stride
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = classifier_dropout_prob
lowerCamelCase_ = use_labels
lowerCamelCase_ = is_training
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase_ = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCamelCase_ = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = MobileViTModelTester(self )
lowerCamelCase_ = MobileViTConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowerCAmelCase__ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = 5
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase_ = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(lowerCAmelCase__ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowerCAmelCase__ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowerCamelCase_ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCamelCase_ = model.to(lowerCAmelCase__ )
lowerCamelCase_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
lowerCamelCase_ = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCamelCase_ = model.to(lowerCAmelCase__ )
lowerCamelCase_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
lowerCamelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 709 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCamelCase_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'facebook/sam-vit-huge'
lowerCamelCase_ = pipeline('mask-generation' , model=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
] , )
| 384 | 0 |
"""simple docstring"""
import baseaa
def lowercase ( lowerCAmelCase__ : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def lowercase ( lowerCAmelCase__ : bytes ) -> str:
return baseaa.baadecode(lowerCAmelCase__ ).decode('''utf-8''' )
if __name__ == "__main__":
lowercase_ = "Hello World!"
lowercase_ = baseaa_encode(test)
print(encoded)
lowercase_ = baseaa_decode(encoded)
print(decoded)
| 695 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , snake_case__ )
__SCREAMING_SNAKE_CASE : int = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__SCREAMING_SNAKE_CASE : Any = dataset_size < in_memory_max_size
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : List[str] = is_small_dataset(snake_case__ )
assert result == expected
| 700 |
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
move_disk(__lowerCamelCase , __lowerCamelCase )
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
print("moving disk from" , __lowerCamelCase , "to" , __lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = int(input("Height of hanoi: " ).strip() )
move_tower(__lowerCamelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 447 | 0 |
'''simple docstring'''
_lowerCAmelCase = range(2, 20 + 1)
_lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
_lowerCAmelCase = {}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCAmelCase__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCAmelCase__ : List[str] = 0, 0
lowerCAmelCase__ : str = n - i
lowerCAmelCase__ : int = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCAmelCase__ : Union[str, Any] = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ : int = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCAmelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ : Optional[int] = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCAmelCase__ : Dict = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCAmelCase__ : Optional[int] = []
else:
lowerCAmelCase__ : Dict = {c: []}
lowerCAmelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ : Any = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ : Union[str, Any] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ : Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ : Optional[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ : str = i
lowerCAmelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ : Dict = ds_c + ds_b
diff += addend
lowerCAmelCase__ : Optional[Any] = 0
for j in range(UpperCamelCase ):
lowerCAmelCase__ : Any = a_i[j] + addend
lowerCAmelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCAmelCase__ : List[str] = digits[j] + addend
if s >= 10:
lowerCAmelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
lowerCAmelCase__ : List[str] = addend // 10 + quotient
else:
lowerCAmelCase__ : List[str] = s
lowerCAmelCase__ : str = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10**15 ):
"""simple docstring"""
lowerCAmelCase__ : str = [1]
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Union[str, Any] = 0
while True:
lowerCAmelCase__ : str = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ : List[str] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
'''simple docstring'''
from collections import deque
class lowerCamelCase__ :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = process_name # process name
lowerCAmelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase_ = arrival_time
lowerCAmelCase_ = burst_time # remaining burst time
lowerCAmelCase_ = 0 # total time of the process wait in ready queue
lowerCAmelCase_ = 0 # time from arrival time to completion time
class lowerCamelCase__ :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
# total number of mlfq's queues
lowerCAmelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase_ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase_ = queue
# current time
lowerCAmelCase_ = current_time
# finished process is in this sequence queue
lowerCAmelCase_ = deque()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self , _lowerCamelCase ):
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self , _lowerCamelCase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = deque() # sequence deque of finished process
while len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase_ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase_ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(_lowerCamelCase )
self.finish_queue.extend(_lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowerCamelCase ) ):
lowerCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase_ = 0
# set the finish time
lowerCAmelCase_ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowerCamelCase )
self.finish_queue.extend(_lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A_ : Any =Process('''P1''', 0, 53)
A_ : int =Process('''P2''', 0, 17)
A_ : Tuple =Process('''P3''', 0, 68)
A_ : List[Any] =Process('''P4''', 0, 24)
A_ : Optional[Any] =3
A_ : Optional[Any] =[17, 25]
A_ : Optional[Any] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A_ : int =Process('''P1''', 0, 53)
A_ : int =Process('''P2''', 0, 17)
A_ : str =Process('''P3''', 0, 68)
A_ : List[str] =Process('''P4''', 0, 24)
A_ : Union[str, Any] =3
A_ : List[Any] =[17, 25]
A_ : Optional[Any] =deque([Pa, Pa, Pa, Pa])
A_ : Union[str, Any] =MLFQ(number_of_queues, time_slices, queue, 0)
A_ : Optional[Any] =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 708 | '''simple docstring'''
def snake_case_ ( __snake_case : int) -> list:
lowerCAmelCase_ = int(__snake_case)
if n_element < 1:
lowerCAmelCase_ = ValueError('''a should be a positive number''')
raise my_error
lowerCAmelCase_ = [1]
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = (0, 0, 0)
lowerCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5))
index += 1
return hamming_list
if __name__ == "__main__":
A_ : Union[str, Any] =input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
A_ : Optional[int] =hamming(int(n))
print('''-----------------------------------------------------''')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('''-----------------------------------------------------''')
| 606 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
__snake_case ={"""allegro/herbert-base-cased""": 514}
__snake_case ={}
class UpperCAmelCase_ ( _a ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = HerbertTokenizer
def __init__( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Optional[int]="<mask>" , UpperCAmelCase__ : Optional[int]="</s>" , **UpperCAmelCase__ : Dict , ) -> List[str]:
super().__init__(
A_ , A_ , tokenizer_file=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , sep_token=A_ , **A_ , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[Any]:
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Optional[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> str:
lowerCAmelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 133 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''hf-internal-testing/tiny-random-t5'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = tokenizer('''This is me''' , return_tensors='''pt''' )
_lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_lowerCAmelCase = model.generate(**_lowerCAmelCase )
_lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_lowerCAmelCase = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''hf-internal-testing/tiny-random-t5'''
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase ) | 721 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""") | 664 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = parquet_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = [parquet_path]
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = ParquetDatasetReader({"train": parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if split:
__lowerCAmelCase = {split: parquet_path}
else:
__lowerCAmelCase = '''train'''
__lowerCAmelCase = {'''train''': parquet_path, '''test''': parquet_path}
__lowerCAmelCase = tmp_path / '''cache'''
__lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase = pq.ParquetFile(tmp_path / "foo.parquet" )
__lowerCAmelCase = pf.read()
assert dataset.data.table == output_table
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = str(shared_datadir / "test_image_rgb.jpg" )
__lowerCAmelCase = {'''image''': [image_path]}
__lowerCAmelCase = Features({"image": Image()} )
__lowerCAmelCase = Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase )
__lowerCAmelCase = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__lowerCAmelCase = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert get_writer_batch_size(__lowerCAmelCase ) == expected
| 636 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : str , snake_case_ : List[Any]=1_3 , snake_case_ : Any=6_4 , snake_case_ : Optional[int]=2 , snake_case_ : int=3 , snake_case_ : str=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=3_2 , snake_case_ : Optional[Any]=5 , snake_case_ : List[Any]=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.1 , snake_case_ : int=1_0 , snake_case_ : int=0.0_2 , snake_case_ : List[Any]=[1, 1_6, 4, 4] , snake_case_ : Any=None , ):
'''simple docstring'''
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : str = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : List[str] = is_training
snake_case__ : List[Any] = use_labels
snake_case__ : List[str] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : Optional[Any] = scope
snake_case__ : Optional[int] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case__ : Dict = (self.image_size // 3_2) ** 2
snake_case__ : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=snake_case_ , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = ViTHybridModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Optional[int] = self.type_sequence_label_size
snake_case__ : int = ViTHybridForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__UpperCAmelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = ViTHybridModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
pass
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : str = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(config=snake_case_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case__ : Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = ViTHybridModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Tuple = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case_ )
snake_case__ : Dict = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**snake_case_ )
# verify the logits
snake_case__ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Union[str, Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
snake_case__ : Optional[int] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
snake_case__ : int = prepare_img()
snake_case__ : int = image_processor(images=snake_case_ , return_tensors='''pt''' )
snake_case__ : int = model(**snake_case_ )
snake_case__ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case__ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 347 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase : Any = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "dhaka" , snake_case : int = 5 ) -> int:
"""simple docstring"""
a : List[str] = min(snake_case , 50 ) # Prevent abuse!
a : Tuple = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
a : Optional[Any] = requests.get('https://www.google.com/search' , params=snake_case , headers=snake_case )
a : Optional[int] = BeautifulSoup(html.text , 'html.parser' )
a : Optional[Any] = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
a : Any = json.dumps(snake_case )
a : Optional[Any] = json.loads(snake_case )
a : List[str] = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , snake_case , )
if not matched_google_image_data:
return 0
a : Optional[int] = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(snake_case ) , )
a : Any = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
a : Optional[int] = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
a : Any = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
a : int = urllib.request.build_opener()
a : Optional[int] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(snake_case )
a : int = F"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
UpperCamelCase : Optional[Any] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 710 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = """▁"""
UpperCamelCase : int = {"""vocab_file""": """spiece.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
UpperCamelCase : Optional[int] = {
"""google/pegasus-xsum""": 512,
}
UpperCamelCase : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : List[str] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str="<pad>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : List[str]="<mask_2>" , UpperCAmelCase_ : int="<mask_1>" , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=1_0_3 , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase_)}, but is"""
f""" {type(UpperCAmelCase_)}""")
a : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase_) , self.offset - 1)
]
if len(set(UpperCAmelCase_)) != len(UpperCAmelCase_):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
a : Union[str, Any] = additional_special_tokens_extended
else:
a : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : List[Any] = mask_token_sent
a : Optional[Any] = vocab_file
a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase_)
# add special tokens to encoder dict
a : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
a : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return len(self.sp_model) + self.offset
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int):
"""simple docstring"""
a : Optional[int] = self.__dict__.copy()
a : List[str] = None
return state
def __setstate__( self : Optional[int] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : str = {}
a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
a : Union[str, Any] = self.sp_model.piece_to_id(UpperCAmelCase_)
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
a : Dict = self.sp_model.IdToPiece(index - self.offset)
return token
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = []
a : Dict = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_) + token
a : Dict = []
else:
current_sub_tokens.append(UpperCAmelCase_)
out_string += self.sp_model.decode(UpperCAmelCase_)
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str=False):
"""simple docstring"""
return 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Tuple = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=None):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 610 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowercase :
def __init__( self : int ,A : Tuple ,A : Dict=13 ,A : Optional[Any]=7 ,A : Optional[int]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=99 ,A : int=[1, 1, 2] ,A : Tuple=1 ,A : List[Any]=32 ,A : List[str]=4 ,A : Any=8 ,A : Optional[int]=37 ,A : Any="gelu_new" ,A : Dict=0.1 ,A : Dict=0.1 ,A : Any=0.0 ,A : Optional[int]=512 ,A : List[str]=3 ,A : Optional[int]=0.0_2 ,A : Optional[Any]=3 ,A : Tuple=4 ,A : Union[str, Any]=None ,A : List[Any]=False ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Dict = use_input_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : int = block_sizes
UpperCAmelCase__ : Tuple = num_decoder_layers
UpperCAmelCase__ : List[str] = d_model
UpperCAmelCase__ : Tuple = n_head
UpperCAmelCase__ : Dict = d_head
UpperCAmelCase__ : str = d_inner
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout
UpperCAmelCase__ : Any = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Tuple = num_choices
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : Union[str, Any] = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCAmelCase__ : Any = n_head
# Used in the tests to check the size of the first hidden state
UpperCAmelCase__ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCAmelCase__ : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCAmelCase__ : Tuple = self.num_hidden_layers + 2
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : int = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ : Tuple = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __lowercase ( self : str ,A : int ,A : str ,A : str ,A : int ,A : Dict ,A : Union[str, Any] ,A : Dict ,):
'''simple docstring'''
UpperCAmelCase__ : int = TFFunnelModel(config=a__ )
UpperCAmelCase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Optional[int] = model(a__ )
UpperCAmelCase__ : Optional[int] = [input_ids, input_mask]
UpperCAmelCase__ : str = model(a__ )
UpperCAmelCase__ : str = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Any = TFFunnelModel(config=a__ )
UpperCAmelCase__ : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = TFFunnelModel(config=a__ )
UpperCAmelCase__ : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def __lowercase ( self : Union[str, Any] ,A : List[str] ,A : str ,A : List[str] ,A : List[Any] ,A : Tuple ,A : Dict ,A : str ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFFunnelBaseModel(config=a__ )
UpperCAmelCase__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : int = model(a__ )
UpperCAmelCase__ : Tuple = [input_ids, input_mask]
UpperCAmelCase__ : Optional[int] = model(a__ )
UpperCAmelCase__ : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[int] = TFFunnelBaseModel(config=a__ )
UpperCAmelCase__ : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Any = TFFunnelBaseModel(config=a__ )
UpperCAmelCase__ : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def __lowercase ( self : int ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFFunnelForPreTraining(config=a__ )
UpperCAmelCase__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[int] ,A : List[str] ,A : Tuple ,A : Tuple ,A : List[str] ,A : List[Any] ,A : List[Any] ,A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFFunnelForMaskedLM(config=a__ )
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : int = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : int ,A : List[str] ,A : Tuple ,A : int ,A : Dict ,A : Optional[Any] ,A : Optional[Any] ,A : Dict ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_labels
UpperCAmelCase__ : Any = TFFunnelForSequenceClassification(config=a__ )
UpperCAmelCase__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Tuple = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : List[str] ,A : Any ,A : List[Any] ,A : str ,A : List[str] ,A : List[Any] ,A : List[Any] ,A : Tuple ,):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.num_choices
UpperCAmelCase__ : Dict = TFFunnelForMultipleChoice(config=a__ )
UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Dict = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : List[str] = tf.tile(tf.expand_dims(a__ ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase__ : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : str ,A : int ,A : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ,A : Union[str, Any] ,A : Dict ,):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Optional[int] = TFFunnelForTokenClassification(config=a__ )
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : List[str] ,A : Optional[int] ,A : Dict ,A : Union[str, Any] ,A : Optional[Any] ,A : int ,A : int ,A : Union[str, Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFFunnelForQuestionAnswering(config=a__ )
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Optional[int] = model(a__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Dict = config_and_inputs
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFFunnelModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self ,config_class=a__ )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@require_tf
class __lowercase ( lowercase_ , unittest.TestCase ):
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFFunnelModelTester(self ,base=a__ )
UpperCAmelCase__ : Any = ConfigTester(self ,config_class=a__ )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a__ )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
| 65 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 50_0000
lowerCamelCase__ , lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCamelCase ( snake_case__ : datasets.Dataset ,**snake_case__ : Any ):
'''simple docstring'''
__snake_case :List[str] = dataset.map(**snake_case__ )
@get_duration
def UpperCamelCase ( snake_case__ : datasets.Dataset ,**snake_case__ : Dict ):
'''simple docstring'''
__snake_case :int = dataset.filter(**snake_case__ )
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :str = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case :Dict = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__snake_case :Optional[int] = generate_example_dataset(
os.path.join(snake_case__ ,"""dataset.arrow""" ) ,snake_case__ ,num_examples=snake_case__ )
__snake_case :str = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" ,use_fast=snake_case__ )
def tokenize(snake_case__ : Optional[int] ):
return tokenizer(examples["""text"""] )
__snake_case :Optional[Any] = map(snake_case__ )
__snake_case :Optional[Any] = map(snake_case__ ,batched=snake_case__ )
__snake_case :Optional[Any] = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""numpy""" ):
__snake_case :int = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""pandas""" ):
__snake_case :List[Any] = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""torch""" ,columns="""numbers""" ):
__snake_case :str = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""tensorflow""" ,columns="""numbers""" ):
__snake_case :Optional[int] = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
__snake_case :Dict = map(snake_case__ ,function=snake_case__ ,batched=snake_case__ )
__snake_case :Tuple = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ ,"""wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 455 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
def UpperCAmelCase__ ( A__ ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCamelCase__ = 128
elif "12-12" in model_name:
lowerCamelCase__ = 12
lowerCamelCase__ = 12
elif "14-14" in model_name:
lowerCamelCase__ = 14
lowerCamelCase__ = 14
elif "16-16" in model_name:
lowerCamelCase__ = 16
lowerCamelCase__ = 16
else:
raise ValueError("Model not supported" )
lowerCamelCase__ = "huggingface/label-files"
if "speech-commands" in model_name:
lowerCamelCase__ = 35
lowerCamelCase__ = "speech-commands-v2-id2label.json"
else:
lowerCamelCase__ = 527
lowerCamelCase__ = "audioset-id2label.json"
lowerCamelCase__ = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ = {int(A__ ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( A__ ) -> str:
"""simple docstring"""
if "module.v" in name:
lowerCamelCase__ = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
lowerCamelCase__ = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
lowerCamelCase__ = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
lowerCamelCase__ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCamelCase__ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
lowerCamelCase__ = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
lowerCamelCase__ = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def UpperCAmelCase__ ( A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(A__ )
if "qkv" in key:
lowerCamelCase__ = key.split("." )
lowerCamelCase__ = int(key_split[3] )
lowerCamelCase__ = config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = val
return orig_state_dict
def UpperCAmelCase__ ( A__ ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
@torch.no_grad()
def UpperCAmelCase__ ( A__ , A__ , A__=False ) -> str:
"""simple docstring"""
lowerCamelCase__ = get_audio_spectrogram_transformer_config(A__ )
lowerCamelCase__ = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
lowerCamelCase__ = model_name_to_url[model_name]
lowerCamelCase__ = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" )
# remove some keys
remove_keys(A__ )
# rename some keys
lowerCamelCase__ = convert_state_dict(A__ , A__ )
# load 🤗 model
lowerCamelCase__ = ASTForAudioClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCamelCase__ = -4.2677393 if "speech-commands" not in model_name else -6.845978
lowerCamelCase__ = 4.5689974 if "speech-commands" not in model_name else 5.5654526
lowerCamelCase__ = 1024 if "speech-commands" not in model_name else 128
lowerCamelCase__ = ASTFeatureExtractor(mean=A__ , std=A__ , max_length=A__ )
if "speech-commands" in model_name:
lowerCamelCase__ = load_dataset("speech_commands" , "v0.02" , split="validation" )
lowerCamelCase__ = dataset[0]["audio"]["array"]
else:
lowerCamelCase__ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
lowerCamelCase__ , lowerCamelCase__ = torchaudio.load(A__ )
lowerCamelCase__ = waveform.squeeze().numpy()
lowerCamelCase__ = feature_extractor(A__ , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
lowerCamelCase__ = model(**A__ )
lowerCamelCase__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCamelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCamelCase__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCamelCase__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCamelCase__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCamelCase__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCamelCase__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCamelCase__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCamelCase__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A__ , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(A__ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_hidden_groups
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> List[str]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowerCamelCase__ = AlbertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowerCamelCase__ = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , sentence_order_label=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
lowerCamelCase__ = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
lowerCamelCase__ = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( __a , __a , unittest.TestCase ):
__a = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]:
lowerCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = AlbertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Dict:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Dict:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@slow
def _lowerCamelCase ( self ) -> int:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> int:
lowerCamelCase__ = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 274 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a: Optional[int] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: List[Any] = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_a: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 162 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCamelCase : Optional[Any] =pytest.mark.integration
@require_faiss
class UpperCAmelCase__ ( __snake_case ):
def A__ ( self ):
_A : Any = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A__ ) for x in np.arange(30 ).tolist()]} )
return dset
def A__ ( self ):
import faiss
_A : Dataset = self._create_dummy_dataset()
_A : List[str] = dset.map(
lambda A__ ,A__ : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=A__ ,keep_in_memory=A__ )
_A : Tuple = dset.add_faiss_index('''vecs''' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT )
_A , _A : List[Any] = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
dset.drop_index('''vecs''' )
def A__ ( self ):
import faiss
_A : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='''vecs''' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
_A , _A : Tuple = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
def A__ ( self ):
import faiss
_A : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='''vecs''' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__ ) as tmp_file:
dset.save_faiss_index('''vecs''' ,tmp_file.name )
dset.load_faiss_index('''vecs2''' ,tmp_file.name )
os.unlink(tmp_file.name )
_A , _A : Optional[int] = dset.get_nearest_examples('''vecs2''' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
def A__ ( self ):
_A : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(A__ ,partial(dset.get_nearest_examples ,'''vecs2''' ,np.ones(5 ,dtype=np.floataa ) ) )
def A__ ( self ):
from elasticsearch import Elasticsearch
_A : Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_A : str = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
_A : Optional[Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
_A : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('''filename''' ,es_client=A__ )
_A , _A : Tuple = dset.get_nearest_examples('''filename''' ,'''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''' )
@require_faiss
class UpperCAmelCase__ ( __snake_case ):
def A__ ( self ):
import faiss
_A : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
_A : str = np.zeros(5 ,dtype=np.floataa )
_A : Dict = 1
_A , _A : Optional[int] = index.search(A__ )
self.assertRaises(A__ ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
_A : Optional[Any] = np.eye(5 ,dtype=np.floataa )[::-1]
_A , _A : Dict = index.search_batch(A__ )
self.assertRaises(A__ ,index.search_batch ,queries[0] )
_A : Tuple = [scores[0] for scores in total_scores]
_A : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,A__ )
def A__ ( self ):
import faiss
_A : List[Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
_A : str = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(A__ ):
_A : List[str] = FaissIndex(string_factory='''Flat''' ,custom_index=faiss.IndexFlat(5 ) )
def A__ ( self ):
import faiss
_A : Any = faiss.IndexFlat(5 )
_A : Any = FaissIndex(custom_index=A__ )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def A__ ( self ):
import faiss
_A : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__ ) as tmp_file:
index.save(tmp_file.name )
_A : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_A : Tuple = np.zeros(5 ,dtype=np.floataa )
_A : Tuple = 1
_A , _A : Union[str, Any] = index.search(A__ )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def a__ (__lowercase :List[Any] ) -> int:
import faiss
_A : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_A : List[Any] = '''index.faiss'''
_A : Any = f"""mock://{index_name}"""
index.save(__lowercase , storage_options=mockfs.storage_options )
_A : Dict = FaissIndex.load(__lowercase , storage_options=mockfs.storage_options )
_A : Tuple = np.zeros(5 , dtype=np.floataa )
_A : Union[str, Any] = 1
_A , _A : Optional[int] = index.search(__lowercase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase__ ( __snake_case ):
def A__ ( self ):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_A : Tuple = Elasticsearch()
_A : Tuple = {'''acknowledged''': True}
_A : int = ElasticSearchIndex(es_client=A__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_A : Dict = '''foo'''
_A : Optional[Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_A , _A : Optional[Any] = index.search(A__ )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
_A : Union[str, Any] = '''foo'''
_A : Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_A , _A : Optional[int] = index.search(A__ ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
_A : Dict = ['''foo''', '''bar''', '''foobar''']
_A : Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_A , _A : Dict = index.search_batch(A__ )
_A : Union[str, Any] = [scores[0] for scores in total_scores]
_A : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) ,0 )
self.assertListEqual([1, 1, 1] ,A__ )
# batched queries with timeout
_A : Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
_A : int = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_A , _A : str = index.search_batch(A__ ,request_timeout=30 )
_A : Dict = [scores[0] for scores in total_scores]
_A : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) ,0 )
self.assertListEqual([1, 1, 1] ,A__ )
| 206 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__a = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
__a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__a = dict(zip(vocab, range(len(vocab))))
__a = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(tmpdirname)
__a = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
__a = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
__a = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
__a = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__a = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__a = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
__a = tokenizer(["Making tiny model"], return_tensors="pt")
__a = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 310 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def A_ ( _lowercase ):
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""", _lowercase, )
if isinstance(_lowercase, torch.Tensor ):
return image
elif isinstance(_lowercase, PIL.Image.Image ):
snake_case_ :Tuple = [image]
if isinstance(image[0], PIL.Image.Image ):
snake_case_, snake_case_ :List[str] = image[0].size
snake_case_, snake_case_ :List[str] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case_ :Any = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case_ :Optional[Any] = np.concatenate(_lowercase, axis=0 )
snake_case_ :Optional[Any] = np.array(_lowercase ).astype(np.floataa ) / 255.0
snake_case_ :str = image.transpose(0, 3, 1, 2 )
snake_case_ :List[str] = 2.0 * image - 1.0
snake_case_ :Dict = torch.from_numpy(_lowercase )
elif isinstance(image[0], torch.Tensor ):
snake_case_ :int = torch.cat(_lowercase, dim=0 )
return image
def A_ ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase, torch.Tensor ):
return mask
elif isinstance(_lowercase, PIL.Image.Image ):
snake_case_ :Optional[Any] = [mask]
if isinstance(mask[0], PIL.Image.Image ):
snake_case_, snake_case_ :List[str] = mask[0].size
snake_case_, snake_case_ :Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case_ :List[str] = [np.array(m.convert("""L""" ).resize((w, h), resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
snake_case_ :Optional[Any] = np.concatenate(_lowercase, axis=0 )
snake_case_ :List[Any] = mask.astype(np.floataa ) / 255.0
snake_case_ :Dict = 0
snake_case_ :List[Any] = 1
snake_case_ :List[str] = torch.from_numpy(_lowercase )
elif isinstance(mask[0], torch.Tensor ):
snake_case_ :List[str] = torch.cat(_lowercase, dim=0 )
return mask
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : UNetaDModel
_A : RePaintScheduler
def __init__( self: Optional[Any] , snake_case: Tuple , snake_case: int ) -> int:
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self: Optional[int] , snake_case: Union[torch.Tensor, PIL.Image.Image] , snake_case: Union[torch.Tensor, PIL.Image.Image] , snake_case: int = 250 , snake_case: float = 0.0 , snake_case: int = 10 , snake_case: int = 10 , snake_case: Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case: Optional[str] = "pil" , snake_case: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
snake_case_ :List[str] = image
snake_case_ :Optional[int] = _preprocess_image(snake_case )
snake_case_ :Union[str, Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ :Tuple = _preprocess_mask(snake_case )
snake_case_ :List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ :List[str] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(snake_case )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case_ :int = original_image.shape
snake_case_ :List[str] = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case , snake_case , snake_case , self.device )
snake_case_ :Dict = eta
snake_case_ :Union[str, Any] = self.scheduler.timesteps[0] + 1
snake_case_ :str = generator[0] if isinstance(snake_case , snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case_ :Any = self.unet(snake_case , snake_case ).sample
# compute previous image: x_t -> x_t-1
snake_case_ :Any = self.scheduler.step(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case_ :Optional[Any] = self.scheduler.undo_step(snake_case , snake_case , snake_case )
snake_case_ :Optional[int] = t
snake_case_ :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ :Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ :Union[str, Any] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 310 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Any=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Dict=36 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Any=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=6 , __lowerCamelCase : int=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=None , __lowerCamelCase : Any=1000 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = text_seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = coordinate_size
SCREAMING_SNAKE_CASE = shape_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE = text_seq_length
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE = bbox[i, j, 3]
SCREAMING_SNAKE_CASE = bbox[i, j, 1]
SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE = bbox[i, j, 2]
SCREAMING_SNAKE_CASE = bbox[i, j, 0]
SCREAMING_SNAKE_CASE = t
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# text + image
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , pixel_values=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE = model(pixel_values=__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=False ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__lowerCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
elif model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
elif model_class in [
*get_values(__lowerCamelCase ),
]:
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
elif model_class in [
*get_values(__lowerCamelCase ),
]:
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowerCamelCase , )
return inputs_dict
def _snake_case ( self : Any ):
self.config_tester.run_common_tests()
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase ) if is_vision_available() else None
@slow
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).pixel_values.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE = model(
input_ids=input_ids.to(__lowerCamelCase ) , bbox=bbox.to(__lowerCamelCase ) , pixel_values=pixel_values.to(__lowerCamelCase ) , )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case_ = mf_knapsack(i - 1 , snake_case , snake_case , snake_case )
else:
snake_case_ = max(
mf_knapsack(i - 1 , snake_case , snake_case , snake_case ) , mf_knapsack(i - 1 , snake_case , snake_case , j - wt[i - 1] ) + val[i - 1] , )
snake_case_ = val
return f[i][j]
def UpperCamelCase_( snake_case : Dict , snake_case : Tuple , snake_case : Dict , snake_case : int ):
'''simple docstring'''
snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case_ = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase_( snake_case : int , snake_case : list , snake_case : list ):
'''simple docstring'''
if not (isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
snake_case_ = len(snake_case )
if num_items != len(snake_case ):
snake_case_ = (
"The number of weights must be the same as the number of values.\n"
f'But got {num_items} weights and {len(snake_case )} values'
)
raise ValueError(snake_case )
for i in range(snake_case ):
if not isinstance(wt[i] , snake_case ):
snake_case_ = (
"All weights must be integers but got weight of "
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(snake_case )
snake_case_ , snake_case_ = knapsack(snake_case , snake_case , snake_case , snake_case )
snake_case_ = set()
_construct_solution(snake_case , snake_case , snake_case , snake_case , snake_case )
return optimal_val, example_optional_set
def UpperCamelCase_( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : set ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case , snake_case , i - 1 , snake_case , snake_case )
else:
optimal_set.add(snake_case )
_construct_solution(snake_case , snake_case , i - 1 , j - wt[i - 1] , snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = [3, 2, 4, 4]
_SCREAMING_SNAKE_CASE : int = [4, 3, 2, 3]
_SCREAMING_SNAKE_CASE : List[Any] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 6
_SCREAMING_SNAKE_CASE : List[str] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 400 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 123 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str]=False ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="dummy_doc" ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = {doc: key_lines}
__lowerCAmelCase : Tuple = {doc: sys_lines}
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase , __lowerCAmelCase : List[Any] = reader.get_doc_mentions(_UpperCAmelCase ,key_doc_lines[doc] ,_UpperCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase : List[Any] = reader.set_annotated_parse_trees(_UpperCAmelCase ,key_doc_lines[doc] ,_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase : Tuple = reader.get_doc_mentions(_UpperCAmelCase ,sys_doc_lines[doc] ,_UpperCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase : str = reader.set_annotated_parse_trees(_UpperCAmelCase ,key_doc_lines[doc] ,_UpperCAmelCase ,_UpperCAmelCase )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = reader.remove_nested_coref_mentions(_UpperCAmelCase ,_UpperCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase : int = reader.remove_nested_coref_mentions(_UpperCAmelCase ,_UpperCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase : int = reader.get_mention_assignments(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : int = reader.get_mention_assignments(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def A ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = get_coref_infos(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : int = 0
__lowerCAmelCase : Union[str, Any] = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = evaluator.evaluate_documents(_UpperCAmelCase ,_UpperCAmelCase ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(1_0 ) ,F"""Recall: {recall * 1_0_0:.2f}""" ,F""" Precision: {precision * 1_0_0:.2f}""" ,F""" F1: {fa * 1_0_0:.2f}""" ,)
if conll_subparts_num == 3:
__lowerCAmelCase : Tuple = (conll / 3) * 1_0_0
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def A ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__lowerCAmelCase : Union[str, Any] = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Tuple:
__lowerCAmelCase : Optional[Any] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__lowerCAmelCase : List[Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase : List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE , sys_lines=SCREAMING_SNAKE_CASE , metrics=SCREAMING_SNAKE_CASE , NP_only=SCREAMING_SNAKE_CASE , remove_nested=SCREAMING_SNAKE_CASE , keep_singletons=SCREAMING_SNAKE_CASE , min_span=SCREAMING_SNAKE_CASE , )
return score
| 123 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a: int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[Any]:
_UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_UpperCAmelCase = torch.load(hf_hub_download(repo_id=__snake_case , filename="""pytorch_model.bin""" ) )
_UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_UpperCAmelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_UpperCAmelCase = tensor_value
_UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
__a: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a: Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 108 |
def A__ (snake_case : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A__ = TypeVar('''T''')
class a ( Generic[T] ):
def __init__( self :int ,__lowercase :list[T] ,__lowercase :Callable[[T, T], T] ):
snake_case__ : Any | T = None
snake_case__ : int = len(lowerCamelCase__ )
snake_case__ : list[T] = [any_type for _ in range(self.N )] + arr
snake_case__ : Tuple = fnc
self.build()
def __lowerCamelCase ( self :int ):
for p in range(self.N - 1 ,0 ,-1 ):
snake_case__ : Dict = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def __lowerCamelCase ( self :Any ,__lowercase :int ,__lowercase :T ):
p += self.N
snake_case__ : Optional[int] = v
while p > 1:
snake_case__ : Tuple = p // 2
snake_case__ : Optional[Any] = self.fn(self.st[p * 2] ,self.st[p * 2 + 1] )
def __lowerCamelCase ( self :Dict ,__lowercase :int ,__lowercase :int ): # noqa: E741
snake_case__ : Optional[Any] = l + self.N, r + self.N
snake_case__ : T | None = None
while l <= r:
if l % 2 == 1:
snake_case__ : List[Any] = self.st[l] if res is None else self.fn(lowerCamelCase__ ,self.st[l] )
if r % 2 == 0:
snake_case__ : Tuple = self.st[r] if res is None else self.fn(lowerCamelCase__ ,self.st[r] )
snake_case__ : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A__ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
A__ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
A__ = SegmentTree(test_array, min)
A__ = SegmentTree(test_array, max)
A__ = SegmentTree(test_array, lambda a, b: a + b)
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) ):
for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
snake_case__ : Dict = reduce(__lowerCAmelCase , test_array[i : j + 1] )
snake_case__ : List[str] = reduce(__lowerCAmelCase , test_array[i : j + 1] )
snake_case__ : int = reduce(lambda __lowerCAmelCase , __lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCAmelCase , __lowerCAmelCase )
assert max_range == max_segment_tree.query(__lowerCAmelCase , __lowerCAmelCase )
assert sum_range == sum_segment_tree.query(__lowerCAmelCase , __lowerCAmelCase )
test_all_segments()
for index, value in test_updates.items():
A__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 718 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase="pt" ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = {'''add_prefix_space''': True} if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not line.startswith(''' ''' ) else {}
snake_case__ : int = padding_side
return tokenizer(
[line] , max_length=__lowerCAmelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ) -> int:
"""simple docstring"""
snake_case__ : Tuple = input_ids.ne(__lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :List[Any] ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :List[Any] ,__lowercase :Union[str, Any]="train" ,__lowercase :Any=None ,__lowercase :List[str]=None ,__lowercase :Any=None ,__lowercase :Optional[Any]="" ,):
super().__init__()
snake_case__ : Dict = Path(__lowercase ).joinpath(type_path + '''.source''' )
snake_case__ : List[Any] = Path(__lowercase ).joinpath(type_path + '''.target''' )
snake_case__ : List[Any] = self.get_char_lens(self.src_file )
snake_case__ : List[str] = max_source_length
snake_case__ : str = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
snake_case__ : Any = tokenizer
snake_case__ : int = prefix
if n_obs is not None:
snake_case__ : Tuple = self.src_lens[:n_obs]
snake_case__ : Optional[int] = src_lang
snake_case__ : int = tgt_lang
def __len__( self :str ):
return len(self.src_lens )
def __getitem__( self :Tuple ,__lowercase :List[str] ):
snake_case__ : Optional[Any] = index + 1 # linecache starts at 1
snake_case__ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,__lowercase ).rstrip('''\n''' )
snake_case__ : List[str] = linecache.getline(str(self.tgt_file ) ,__lowercase ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case__ : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__lowercase ) else self.tokenizer
)
snake_case__ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,__lowercase ) else self.tokenizer
snake_case__ : List[Any] = encode_line(__lowercase ,__lowercase ,self.max_source_length ,'''right''' )
snake_case__ : Any = encode_line(__lowercase ,__lowercase ,self.max_target_length ,'''right''' )
snake_case__ : Optional[int] = source_inputs['''input_ids'''].squeeze()
snake_case__ : Optional[Any] = target_inputs['''input_ids'''].squeeze()
snake_case__ : Optional[int] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( __lowercase :List[Any] ):
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ):
snake_case__ : List[str] = torch.stack([x['''input_ids'''] for x in batch] )
snake_case__ : Any = torch.stack([x['''attention_mask'''] for x in batch] )
snake_case__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
snake_case__ : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__lowercase )
else self.tokenizer.pad_token_id
)
snake_case__ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__lowercase )
else self.tokenizer.pad_token_id
)
snake_case__ : List[Any] = trim_batch(__lowercase ,__lowercase )
snake_case__ , snake_case__ : int = trim_batch(__lowercase ,__lowercase ,attention_mask=__lowercase )
snake_case__ : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
A__ = getLogger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Tuple = get_git_info()
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''git_log.json''' ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=4 , **__lowerCAmelCase ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
with open(__lowerCAmelCase ) as f:
return json.load(__lowerCAmelCase )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = git.Repo(search_parent_directories=__lowerCAmelCase )
snake_case__ : Optional[Any] = {
'''repo_id''': str(__lowerCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List:
"""simple docstring"""
return list(map(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
with open(__lowerCAmelCase , '''wb''' ) as f:
return pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
def remove_articles(__lowerCAmelCase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
snake_case__ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = normalize_answer(__lowerCAmelCase ).split()
snake_case__ : Dict = normalize_answer(__lowerCAmelCase ).split()
snake_case__ : int = Counter(__lowerCAmelCase ) & Counter(__lowerCAmelCase )
snake_case__ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case__ : List[str] = 1.0 * num_same / len(__lowerCAmelCase )
snake_case__ : Dict = 1.0 * num_same / len(__lowerCAmelCase )
snake_case__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
return normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case__ : Optional[int] = 0
for hypo, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
em += exact_match_score(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
em /= len(__lowerCAmelCase )
return {"em": em}
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : str = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case__ : Union[str, Any] = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and not hasattr(__lowerCAmelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
continue
snake_case__ : Optional[Any] = p if hasattr(__lowerCAmelCase , __lowerCAmelCase ) else equivalent_param[p]
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
return hparams, config
| 219 | 0 |
from __future__ import annotations
from math import pi, sqrt
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Optional[Any] , *snake_case_ : Tuple , **snake_case_ : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *snake_case_ : Tuple , **snake_case_ : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case_ : Tuple , **snake_case_ : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Any , *snake_case_ : Tuple , **snake_case_ : Dict ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , *snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , *snake_case_ : int , **snake_case_ : Any ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case_ : Tuple , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Dict , *snake_case_ : str , **snake_case_ : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , *snake_case_ : Tuple , **snake_case_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , *snake_case_ : Tuple , **snake_case_ : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Union[str, Any] , *snake_case_ : str , **snake_case_ : str ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : str , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Dict , *snake_case_ : int , **snake_case_ : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : int , *snake_case_ : Any , **snake_case_ : List[str] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *snake_case_ : Any , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Tuple , *snake_case_ : List[Any] , **snake_case_ : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Dict , *snake_case_ : int , **snake_case_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : str , *snake_case_ : int , **snake_case_ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : int ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : Any ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , *snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Tuple , *snake_case_ : Any , **snake_case_ : Dict ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Dict , *snake_case_ : List[str] , **snake_case_ : Any ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Dict , *snake_case_ : List[Any] , **snake_case_ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : int ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : int , *snake_case_ : Dict , **snake_case_ : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case ):
lowerCamelCase_ = ['flax']
def __init__( self : str , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : str , *snake_case_ : Optional[int] , **snake_case_ : Dict ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls : Any , *snake_case_ : Optional[int] , **snake_case_ : str ):
"""simple docstring"""
requires_backends(cls , ['''flax'''] ) | 256 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( a : str , a : Dict , a : Tuple , a : Dict , a : Dict ):
"""simple docstring"""
with open(a ) as metadata_file:
_snake_case : Tuple = json.load(a )
_snake_case : Dict = LukeConfig(use_entity_aware_attention=a , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_snake_case : int = torch.load(a , map_location="cpu" )["module"]
# Load the entity vocab file
_snake_case : Dict = load_original_entity_vocab(a )
# add an entry for [MASK2]
_snake_case : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case : List[str] = AddedToken("<ent>" , lstrip=a , rstrip=a )
_snake_case : Tuple = AddedToken("<ent2>" , lstrip=a , rstrip=a )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(a )
with open(os.path.join(a , "tokenizer_config.json" ) , "r" ) as f:
_snake_case : str = json.load(a )
_snake_case : Optional[Any] = "MLukeTokenizer"
with open(os.path.join(a , "tokenizer_config.json" ) , "w" ) as f:
json.dump(a , a )
with open(os.path.join(a , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(a , a )
_snake_case : Tuple = MLukeTokenizer.from_pretrained(a )
# Initialize the embeddings of the special tokens
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(["#"] )[0]
_snake_case : Tuple = state_dict["embeddings.word_embeddings.weight"]
_snake_case : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_snake_case : Dict = word_emb[enta_init_index].unsqueeze(0 )
_snake_case : Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_snake_case : Optional[Any] = state_dict[bias_name]
_snake_case : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
_snake_case : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_snake_case : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
_snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
_snake_case : str = state_dict[prefix + matrix_name]
_snake_case : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
_snake_case : Optional[int] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_snake_case : str = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_snake_case : str = state_dict["entity_predictions.bias"]
_snake_case : List[str] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_snake_case : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
_snake_case : Optional[Any] = LukeForMaskedLM(config=a ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_snake_case : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_snake_case : Tuple = state_dict[key]
else:
_snake_case : str = state_dict[key]
_snake_case , _snake_case : str = model.load_state_dict(a , strict=a )
if set(a ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_snake_case : Dict = MLukeTokenizer.from_pretrained(a , task="entity_classification" )
_snake_case : str = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_snake_case : Tuple = (0, 9)
_snake_case : Any = tokenizer(a , entity_spans=[span] , return_tensors="pt" )
_snake_case : str = model(**a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case : Any = torch.Size((1, 33, 768) )
_snake_case : Any = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case : Tuple = torch.Size((1, 1, 768) )
_snake_case : Union[str, Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_snake_case : str = MLukeTokenizer.from_pretrained(a )
_snake_case : Optional[int] = "Tokyo is the capital of <mask>."
_snake_case : Any = (24, 30)
_snake_case : Dict = tokenizer(a , entity_spans=[span] , return_tensors="pt" )
_snake_case : List[Any] = model(**a )
_snake_case : List[str] = encoding["input_ids"][0].tolist()
_snake_case : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a )
_snake_case : int = outputs.entity_logits[0][0].argmax().item()
_snake_case : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(a ) )
model.save_pretrained(a )
def a__ ( a : List[str] ):
"""simple docstring"""
_snake_case : Optional[int] = ["[MASK]", "[PAD]", "[UNK]"]
_snake_case : Dict = [json.loads(a ) for line in open(a )]
_snake_case : Any = {}
for entry in data:
_snake_case : List[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_snake_case : int = entity_id
break
_snake_case : int = f'{language}:{entity_name}'
_snake_case : Dict = entity_id
return new_mapping
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_a : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 87 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 282 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE : Tuple = PandasConfig
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={'files': files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(_SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(_SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(_SCREAMING_SNAKE_CASE )
| 284 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case (__lowercase = "isbn/0140328726" ) -> dict:
'''simple docstring'''
_snake_case : Optional[Any] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
_snake_case : int = F"""{olid} is not a valid Open Library olid"""
raise ValueError(__lowercase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def snake_case (__lowercase ) -> dict:
'''simple docstring'''
_snake_case : Optional[Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
_snake_case : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_snake_case : List[str] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
_snake_case : str = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__lowercase , __lowercase ):
_snake_case : Optional[int] = ", ".join(__lowercase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__SCREAMING_SNAKE_CASE : int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__SCREAMING_SNAKE_CASE : str = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('\n'.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''') | 580 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=0 , ):
_snake_case : Tuple = parent
_snake_case : Optional[int] = batch_size
_snake_case : Tuple = seq_length
_snake_case : Any = is_training
_snake_case : int = use_input_mask
_snake_case : Union[str, Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : int = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Optional[Any] = type_vocab_size
_snake_case : str = type_sequence_label_size
_snake_case : Dict = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : str = num_choices
_snake_case : Union[str, Any] = scope
_snake_case : Any = projection_dim
def UpperCamelCase ( self ):
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Union[str, Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Tuple = None
if self.use_token_type_ids:
_snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Any = None
if self.use_labels:
_snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
_snake_case : Optional[int] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Union[str, Any] = TFDPRContextEncoder(config=lowercase_ )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_snake_case : Any = model(lowercase_ , token_type_ids=lowercase_ )
_snake_case : int = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Union[str, Any] = TFDPRQuestionEncoder(config=lowercase_ )
_snake_case : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_snake_case : Tuple = model(lowercase_ , token_type_ids=lowercase_ )
_snake_case : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = TFDPRReader(config=lowercase_ )
_snake_case : Any = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self ):
_snake_case : Dict = self.prepare_config_and_inputs()
(
(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,(
_snake_case
) ,
) : Any = config_and_inputs
_snake_case : Any = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowerCamelCase = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Tuple = TFDPRModelTester(self )
_snake_case : str = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase_ )
@slow
def UpperCamelCase ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[Any] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Dict = TFDPRReader.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_snake_case : Optional[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case : Dict = model(lowercase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case : Any = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 580 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A : str = [
"good first issue",
"feature request",
"wip",
]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Union[str, Any] = g.get_repo("""huggingface/accelerate""" )
lowerCAmelCase_ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda A__ : i.created_at , reverse=A__ )
lowerCAmelCase_ : List[Any] = comments[0] if len(A__ ) > 0 else None
lowerCAmelCase_ : Optional[Any] = dt.utcnow()
lowerCAmelCase_ : Optional[Any] = (current_time - issue.updated_at).days
lowerCAmelCase_ : Optional[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 275 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Any = CycleDiffusionPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
lowercase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
lowercase__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowercase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__lowercase = CLIPTextModel(lowercase )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Any=0 ) -> Any:
"""simple docstring"""
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = CycleDiffusionPipeline(**lowercase )
__lowercase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__lowercase = self.get_dummy_inputs(lowercase )
__lowercase = pipe(**lowercase )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
__lowercase = module.half()
__lowercase = CycleDiffusionPipeline(**lowercase )
__lowercase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__lowercase = self.get_dummy_inputs(lowercase )
__lowercase = pipe(**lowercase )
__lowercase = output.images
__lowercase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def snake_case__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def snake_case__ ( self : str ) -> Dict:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
__lowercase = init_image.resize((512, 512) )
__lowercase = """CompVis/stable-diffusion-v1-4"""
__lowercase = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
__lowercase = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
__lowercase = """A black colored car"""
__lowercase = """A blue colored car"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
__lowercase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def snake_case__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
__lowercase = init_image.resize((512, 512) )
__lowercase = """CompVis/stable-diffusion-v1-4"""
__lowercase = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
__lowercase = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
__lowercase = """A black colored car"""
__lowercase = """A blue colored car"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
__lowercase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 634 |
def UpperCAmelCase__ ( lowercase__ ) -> Optional[int]:
__lowercase = len(lowercase__ )
__lowercase = sum(lowercase__ )
__lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase = True
for i in range(1 , s + 1 ):
__lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase = s - 2 * j
break
return diff
| 634 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : str=32 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : Union[str, Any]=[10, 20, 30, 40] , UpperCamelCase__ : Tuple=[1, 1, 2, 1] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]="relu" , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = embeddings_size
__magic_name__ = hidden_sizes
__magic_name__ = depths
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_act
__magic_name__ = num_labels
__magic_name__ = scope
__magic_name__ = len(UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = TFRegNetModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = TFRegNetForImageClassification(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : str ) -> Any:
"""simple docstring"""
__magic_name__ = TFRegNetModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _lowercase ( self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
__magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ = layer_type
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int={} ):
__magic_name__ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCamelCase__ , UpperCamelCase__ ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _lowercase ( self : str ) -> str:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFRegNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : str ) -> int:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
__magic_name__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
__magic_name__ = model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
__magic_name__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__magic_name__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
| 529 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = None
a__ = None
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """train"""
a__ = """dev"""
a__ = """test"""
class UpperCAmelCase_ :
'''simple docstring'''
@staticmethod
def _lowercase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowercase ( UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowercase ( UpperCamelCase__ : List[InputExample] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple="[CLS]" , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Optional[Any]=-100 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[int]=True , ) -> List[InputFeatures]:
"""simple docstring"""
__magic_name__ = {label: i for i, label in enumerate(UpperCamelCase__ )}
__magic_name__ = []
for ex_index, example in enumerate(UpperCamelCase__ ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" , UpperCamelCase__ , len(UpperCamelCase__ ) )
__magic_name__ = []
__magic_name__ = []
for word, label in zip(example.words , example.labels ):
__magic_name__ = tokenizer.tokenize(UpperCamelCase__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCamelCase__ ) > 0:
tokens.extend(UpperCamelCase__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCamelCase__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__ = tokenizer.num_special_tokens_to_add()
if len(UpperCamelCase__ ) > max_seq_length - special_tokens_count:
__magic_name__ = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__ = [sequence_a_segment_id] * len(UpperCamelCase__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__ = [cls_token] + tokens
__magic_name__ = [pad_token_label_id] + label_ids
__magic_name__ = [cls_token_segment_id] + segment_ids
__magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__ = [1 if mask_padding_with_zero else 0] * len(UpperCamelCase__ )
# Zero-pad up to the sequence length.
__magic_name__ = max_seq_length - len(UpperCamelCase__ )
if pad_on_left:
__magic_name__ = ([pad_token] * padding_length) + input_ids
__magic_name__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__ = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCamelCase__ ) == max_seq_length
assert len(UpperCamelCase__ ) == max_seq_length
assert len(UpperCamelCase__ ) == max_seq_length
assert len(UpperCamelCase__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(UpperCamelCase__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(UpperCamelCase__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(UpperCamelCase__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(UpperCamelCase__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(UpperCamelCase__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ = None
features.append(
InputFeatures(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , label_ids=UpperCamelCase__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
a__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[Any] , UpperCamelCase__ : TokenClassificationTask , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Split = Split.train , ) -> str:
"""simple docstring"""
__magic_name__ = os.path.join(
UpperCamelCase__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(UpperCamelCase__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ = cached_features_file + """.lock"""
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
__magic_name__ = torch.load(UpperCamelCase__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
__magic_name__ = token_classification_task.read_examples_from_file(UpperCamelCase__ , UpperCamelCase__ )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ = token_classification_task.convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , UpperCamelCase__ )
def __len__( self : List[str] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , UpperCamelCase__ : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = -1_00
def __init__( self : List[str] , UpperCamelCase__ : TokenClassificationTask , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Split = Split.train , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = token_classification_task.read_examples_from_file(UpperCamelCase__ , UpperCamelCase__ )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__ = token_classification_task.convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__ = tf.data.Dataset.from_generator(
UpperCamelCase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__ = tf.data.Dataset.from_generator(
UpperCamelCase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 529 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __A : Optional[int] , __A : Optional[int]=1_3 , __A : Optional[Any]=7 , __A : List[str]=True , __A : int=True , __A : int=True , __A : Union[str, Any]=True , __A : Union[str, Any]=9_9 , __A : List[Any]=3_2 , __A : Optional[Any]=5 , __A : int=4 , __A : List[str]=3_7 , __A : Any="gelu" , __A : Tuple=0.1 , __A : Tuple=0.1 , __A : Union[str, Any]=5_1_2 , __A : Tuple=1_6 , __A : List[Any]=2 , __A : List[str]=0.0_2 , __A : List[str]=4 , ):
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Optional[int] = seq_length
snake_case__ : List[str] = is_training
snake_case__ : Optional[int] = use_attention_mask
snake_case__ : Dict = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Union[str, Any] = num_choices
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] = None
if self.use_attention_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[int] = None
if self.use_token_type_ids:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : int ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__, snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self : str ):
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__, snake_case__ : List[str] = config_and_inputs
snake_case__ : Tuple = True
snake_case__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = True
a_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = FlaxBertModelTester(self )
@slow
def _lowercase ( self : int ):
snake_case__ : List[Any] = FlaxBertModel.from_pretrained("bert-base-cased" )
snake_case__ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
'''simple docstring'''
__lowerCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[int]:
_a : int = True
_a : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
order.append(lowerCAmelCase_ )
return order
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[int]:
_a : List[str] = True
_a : Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return component
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[list[int]]:
_a : Optional[int] = len(lowerCAmelCase_ ) * [False]
_a : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCAmelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCAmelCase_ )
_a : List[Any] = []
for i, was_visited in enumerate(lowerCAmelCase_ ):
if not was_visited:
order += topology_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Any = []
_a : Tuple = len(lowerCAmelCase_ ) * [False]
for i in range(len(lowerCAmelCase_ ) ):
_a : Any = order[len(lowerCAmelCase_ ) - i - 1]
if not visited[vert]:
_a : Optional[int] = find_components(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
components_list.append(lowerCAmelCase_ )
return components_list
| 358 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCAmelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'esm'
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=768 ,_UpperCAmelCase : Union[str, Any]=12 ,_UpperCAmelCase : List[str]=12 ,_UpperCAmelCase : Tuple=3072 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]=1026 ,_UpperCAmelCase : List[str]=0.02 ,_UpperCAmelCase : Optional[int]=1E-12 ,_UpperCAmelCase : List[str]="absolute" ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,**_UpperCAmelCase : List[Any] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,mask_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = vocab_size
_a : Union[str, Any] = hidden_size
_a : Dict = num_hidden_layers
_a : int = num_attention_heads
_a : Dict = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Optional[int] = initializer_range
_a : List[Any] = layer_norm_eps
_a : int = position_embedding_type
_a : Optional[int] = use_cache
_a : Any = emb_layer_norm_before
_a : List[str] = token_dropout
_a : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_a : Dict = EsmFoldConfig()
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = EsmFoldConfig(**_UpperCAmelCase )
_a : Optional[int] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_a : Optional[int] = get_default_vocab_list()
else:
_a : Optional[int] = vocab_list
else:
_a : Optional[Any] = None
_a : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,_UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __lowercase ( self : Any ):
_a : str = super().to_dict()
if isinstance(self.esmfold_config ,_UpperCAmelCase ):
_a : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : str = None
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : float = 0
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : "TrunkConfig" = None
def __lowercase ( self : List[str] ):
if self.trunk is None:
_a : Dict = TrunkConfig()
elif isinstance(self.trunk ,_UpperCAmelCase ):
_a : str = TrunkConfig(**self.trunk )
def __lowercase ( self : List[Any] ):
_a : List[str] = asdict(self )
_a : List[str] = self.trunk.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : int = 4_8
lowerCAmelCase : int = 1_0_2_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : float = 0
lowerCAmelCase : float = 0
lowerCAmelCase : bool = False
lowerCAmelCase : int = 4
lowerCAmelCase : Optional[int] = 1_2_8
lowerCAmelCase : "StructureModuleConfig" = None
def __lowercase ( self : str ):
if self.structure_module is None:
_a : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,_UpperCAmelCase ):
_a : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_a : Optional[int] = self.sequence_state_dim // self.sequence_head_width
_a : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = asdict(self )
_a : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : int = 3_8_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_6
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_2
lowerCAmelCase : int = 4
lowerCAmelCase : int = 8
lowerCAmelCase : float = 0.1
lowerCAmelCase : int = 8
lowerCAmelCase : int = 1
lowerCAmelCase : int = 2
lowerCAmelCase : int = 7
lowerCAmelCase : int = 1_0
lowerCAmelCase : float = 1e-8
lowerCAmelCase : float = 1e5
def __lowercase ( self : str ):
return asdict(self )
def __lowerCamelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 358 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :str = split_dict._to_yaml_list()
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = SplitDict._from_yaml_list(_SCREAMING_SNAKE_CASE )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase__ :Optional[int] = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase__ :str = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_SCREAMING_SNAKE_CASE ), SplitInfo(dataset_name='my_dataset' )] )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 711 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """SpeechT5FeatureExtractor"""
__magic_name__ :List[Any] = """SpeechT5Tokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('text' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('text_target' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('audio_target' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('sampling_rate' , __UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
elif text is not None:
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = None
if audio_target is not None:
lowerCAmelCase__ :int = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :int = targets['input_values']
elif text_target is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Dict = targets['input_ids']
else:
lowerCAmelCase__ :Dict = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Dict = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.pop('input_values' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('labels' , __UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase__ :Union[str, Any] = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif input_ids is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCAmelCase__ :List[str] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = targets['input_ids']
else:
lowerCAmelCase__ :Optional[int] = self.feature_extractor.feature_size
lowerCAmelCase__ :int = self.feature_extractor.num_mel_bins
lowerCAmelCase__ :Dict = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feature_size_hack
lowerCAmelCase__ :str = targets['input_values']
else:
lowerCAmelCase__ :Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Tuple = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
| 560 | 0 |
'''simple docstring'''
_UpperCamelCase : Any = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 284 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = 'sew'
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_=2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=0 , snake_case_=1 , snake_case_=2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
A__ : Dict = hidden_size
A__ : Dict = feat_extract_norm
A__ : str = feat_extract_activation
A__ : Optional[Any] = list(snake_case_ )
A__ : str = list(snake_case_ )
A__ : Any = list(snake_case_ )
A__ : Any = conv_bias
A__ : Any = num_conv_pos_embeddings
A__ : Any = num_conv_pos_embedding_groups
A__ : str = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : int = intermediate_size
A__ : Union[str, Any] = squeeze_factor
A__ : Union[str, Any] = hidden_act
A__ : List[str] = num_attention_heads
A__ : List[str] = hidden_dropout
A__ : Dict = attention_dropout
A__ : Tuple = activation_dropout
A__ : Optional[int] = feat_proj_dropout
A__ : Optional[Any] = final_dropout
A__ : int = layerdrop
A__ : List[Any] = layer_norm_eps
A__ : int = initializer_range
A__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : int = mask_time_prob
A__ : Tuple = mask_time_length
A__ : Optional[Any] = mask_time_min_masks
A__ : Any = mask_feature_prob
A__ : List[Any] = mask_feature_length
A__ : Any = mask_feature_min_masks
# ctc loss
A__ : str = ctc_loss_reduction
A__ : List[Any] = ctc_zero_infinity
# sequence classification
A__ : Union[str, Any] = use_weighted_layer_sum
A__ : str = classifier_proj_size
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 363 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = 42
@flax_register_to_config
class lowerCamelCase_ ( nn.Module ,_A ,_A ):
'''simple docstring'''
a__ = 32
a__ = 4
a__ = 4
a__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a__ = False
a__ = (320, 640, 1280, 1280)
a__ = 2
a__ = 8
a__ = None
a__ = 1280
a__ = 0.0
a__ = False
a__ = jnp.floataa
a__ = True
a__ = 0
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
A : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
A : Dict = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
A : Any = jnp.ones((1,) , dtype=jnp.intaa )
A : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A , A : Tuple = jax.random.split(__lowerCamelCase )
A : str = {"params": params_rng, "dropout": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
A : Any = self.block_out_channels
A : Any = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
A : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A : int = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
A : List[Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
A : List[str] = []
A : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
A : List[str] = output_channel
A : Union[str, Any] = block_out_channels[i]
A : Any = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A : Any = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A : Optional[Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
A : List[str] = down_blocks
# mid
A : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
A : Tuple = []
A : Tuple = list(reversed(__lowerCamelCase ) )
A : Any = list(reversed(__lowerCamelCase ) )
A : List[Any] = list(reversed(__lowerCamelCase ) )
A : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
A : List[str] = output_channel
A : Optional[int] = reversed_block_out_channels[i]
A : Any = reversed_block_out_channels[min(i + 1 , len(__lowerCamelCase ) - 1 )]
A : Optional[Any] = i == len(__lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
A : Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , prev_output_channel=__lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A : Optional[int] = FlaxUpBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , prev_output_channel=__lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowerCamelCase )
A : Optional[Any] = output_channel
A : Union[str, Any] = up_blocks
# out
A : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
A : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
A : List[str] = timesteps.astype(dtype=jnp.floataa )
A : Union[str, Any] = jnp.expand_dims(__lowerCamelCase , 0 )
A : Optional[int] = self.time_proj(__lowerCamelCase )
A : Dict = self.time_embedding(__lowerCamelCase )
# 2. pre-process
A : str = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
A : List[str] = self.conv_in(__lowerCamelCase )
# 3. down
A : int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A , A : Dict = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
A , A : List[Any] = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowerCamelCase , __lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A : Dict = new_down_block_res_samples
# 4. mid
A : Any = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A : Any = down_block_res_samples[-(self.layers_per_block + 1) :]
A : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : int = up_block(
__lowerCamelCase , temb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , deterministic=not train , )
else:
A : int = up_block(__lowerCamelCase , temb=__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , deterministic=not train )
# 6. post-process
A : Union[str, Any] = self.conv_norm_out(__lowerCamelCase )
A : str = nn.silu(__lowerCamelCase )
A : str = self.conv_out(__lowerCamelCase )
A : List[str] = jnp.transpose(__lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowerCamelCase ) | 17 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCAmelCase ( _a ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : int = self._create_example_records()
snake_case_ : List[str] = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = self._create_example_records()
snake_case_ : Tuple = Dataset.from_list(__magic_name__ )
snake_case_ : Optional[int] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowerCamelCase (self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
snake_case_ : Dict = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
snake_case_ : Any = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowerCamelCase (self ) -> Optional[int]: # checks if the type can be inferred from the second record
'''simple docstring'''
snake_case_ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
snake_case_ : str = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 60 |
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE_ ( __A : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(__A , 0 , __A , args=(__A) )[0]
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float ) -> float:
"""simple docstring"""
return math.pow(__A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 570 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for param in module.parameters():
_snake_case = False
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_snake_case = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = plt.imshow(UpperCAmelCase__ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase__ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase__ )
plt.show()
def snake_case_ ( ):
'''simple docstring'''
_snake_case = datetime.now()
_snake_case = current_time.strftime("%H:%M:%S" )
return timestamp
| 710 |
'''simple docstring'''
import cva
import numpy as np
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
if k in (0.04, 0.06):
_snake_case = k
_snake_case = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ):
return str(self.k )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = cva.imread(lowerCamelCase , 0 )
_snake_case , _snake_case = img.shape
_snake_case = []
_snake_case = img.copy()
_snake_case = cva.cvtColor(lowerCamelCase , cva.COLOR_GRAY2RGB )
_snake_case , _snake_case = np.gradient(lowerCamelCase )
_snake_case = dx**2
_snake_case = dy**2
_snake_case = dx * dy
_snake_case = 0.04
_snake_case = self.window_size // 2
for y in range(lowerCamelCase , h - offset ):
for x in range(lowerCamelCase , w - offset ):
_snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case = (wxx * wyy) - (wxy**2)
_snake_case = wxx + wyy
_snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__magic_name__ : Tuple = HarrisCorner(0.04, 3)
__magic_name__ , __magic_name__ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 368 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case__ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
def UpperCamelCase_( _A :Dict )-> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCamelCase = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case__ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = pipeline(
"document-question-answering" , model=snake_case , tokenizer=snake_case , image_processor=snake_case )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , "" ) ) )
UpperCamelCase__ = "What is the placebo?"
UpperCamelCase__ = [
{
"image": load_image(snake_case ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = dqa_pipeline(snake_case , top_k=2 )
self.assertEqual(
snake_case , [
[
{"score": ANY(snake_case ), "answer": ANY(snake_case ), "start": ANY(snake_case ), "end": ANY(snake_case )},
{"score": ANY(snake_case ), "answer": ANY(snake_case ), "start": ANY(snake_case ), "end": ANY(snake_case )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = "How many cats are there?"
UpperCamelCase__ = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , snake_case )
UpperCamelCase__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase__ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(snake_case , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase__ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , words=snake_case , boxes=snake_case , top_k=2 )
self.assertEqual(snake_case , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = "What is the invoice number?"
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = "What is the invoice number?"
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case )
UpperCamelCase__ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case , revision="3dc6de3" , )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = "What is the invoice number?"
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCamelCase__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCamelCase__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCamelCase__ = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , "" ) ) )
# This model should also work if `image` is set to None
UpperCamelCase__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case )
UpperCamelCase__ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case , revision="3dc6de3" , max_seq_len=50 , )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = "What is the invoice number?"
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCamelCase__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCamelCase__ = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , "" ) ) )
# This model should also work if `image` is set to None
UpperCamelCase__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCamelCase__ = INVOICE_URL
UpperCamelCase__ = "What is the invoice number?"
UpperCamelCase__ = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 551 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase_( _A :Tuple , _A :str )-> int:
# ===== initialization =====
UpperCamelCase__ = Mock()
UpperCamelCase__ = conn, Mock()
UpperCamelCase__ = iter([1, None] )
UpperCamelCase__ = lambda _A : next(_A )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 551 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: str , __a: str=None ):
require_version(deps[pkg] , __a )
| 715 |
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226 | 0 |
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
A_ : Tuple = len(__lowercase )
A_ : List[str] = max(__lowercase )
A_ : List[Any] = min(__lowercase )
# create the counting array
A_ : Optional[Any] = coll_max + 1 - coll_min
A_ : Tuple = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,__lowercase ):
A_ : int = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A_ : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,__lowercase ) ):
A_ : Optional[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
return "".join([chr(__lowercase ) for i in counting_sort([ord(__lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
_UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 558 | import argparse
import os
import re
import packaging.version
_UpperCAmelCase = """examples/"""
_UpperCAmelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_UpperCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_UpperCAmelCase = """README.md"""
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Any ,__lowercase : Tuple ):
'''simple docstring'''
with open(__lowercase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
A_ : Dict = f.read()
A_ , A_ : Dict = REPLACE_PATTERNS[pattern]
A_ : List[str] = replace.replace('VERSION' ,__lowercase )
A_ : int = re_pattern.sub(__lowercase ,__lowercase )
with open(__lowercase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.write(__lowercase )
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__lowercase ,__lowercase ) ,__lowercase ,pattern='examples' )
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowercase ,__lowercase ,__lowercase )
if not patch:
update_version_in_examples(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = '🤗 Transformers currently provides the following architectures'
A_ : str = '1. Want to contribute a new model?'
with open(__lowercase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
A_ : int = f.readlines()
# Find the start of the list.
A_ : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
A_ : str = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' ,'https://huggingface.co/docs/transformers/model_doc' ,)
index += 1
with open(__lowercase ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES['init'] ,'r' ) as f:
A_ : Any = f.read()
A_ : Union[str, Any] = REPLACE_PATTERNS['init'][0].search(__lowercase ).groups()[0]
return packaging.version.parse(__lowercase )
def UpperCamelCase ( __lowercase : Tuple=False ):
'''simple docstring'''
A_ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
A_ : Any = default_version.base_version
elif patch:
A_ : str = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ : Dict = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__lowercase ) == 0:
A_ : Union[str, Any] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowercase ,patch=__lowercase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Union[str, Any] = get_version()
A_ : List[str] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ : List[str] = current_version.base_version
# Check with the user we got that right.
A_ : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__lowercase ) == 0:
A_ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__lowercase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 558 | 1 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Any = float(a__ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE : Dict = decimal - int(a__ )
if fractional_part == 0:
return int(a__ ), 1
else:
SCREAMING_SNAKE_CASE : List[Any] = len(str(a__ ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE : str = int(decimal * (10**number_of_frac_digits) )
SCREAMING_SNAKE_CASE : List[str] = 10**number_of_frac_digits
SCREAMING_SNAKE_CASE : Optional[int] = denominator, numerator
while True:
SCREAMING_SNAKE_CASE : Any = dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE : Optional[Any] = divisor, remainder
SCREAMING_SNAKE_CASE : Union[str, Any] = numerator / divisor, denominator / divisor
return int(a__ ), int(a__ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 707 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE : List[Any] = len(a__ )
SCREAMING_SNAKE_CASE : int = max(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = min(a__ )
# create the counting array
SCREAMING_SNAKE_CASE : str = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
SCREAMING_SNAKE_CASE : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE : int = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
SCREAMING_SNAKE_CASE : Dict = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
a__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
a__ : str = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 333 | 0 |
"""simple docstring"""
import math
def A__ ( A__ ) -> bool:
'''simple docstring'''
_UpperCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(A__ )
def A__ ( A__ = 1 / 1_2345 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 3
while True:
_UpperCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(A__ ):
_UpperCAmelCase = int(A__ )
total_partitions += 1
if check_partition_perfect(A__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(A__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 426 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Dict = "llama"
A__ : int = ["past_key_values"]
def __init__( self , snake_case_=32000 , snake_case_=4096 , snake_case_=11008 , snake_case_=32 , snake_case_=32 , snake_case_=None , snake_case_="silu" , snake_case_=2048 , snake_case_=0.02 , snake_case_=1e-6 , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=1 , snake_case_=False , snake_case_=None , **snake_case_ , ) -> Any:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_key_value_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = use_cache
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ , )
def __A ( self ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_UpperCAmelCase = self.rope_scaling.get("type" , snake_case_ )
_UpperCAmelCase = self.rope_scaling.get("factor" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 426 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = value
lowercase = None
lowercase = None
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = tree
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> list:
'''simple docstring'''
lowercase = word.split()
def justify(lowerCAmelCase__ :list , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> str:
lowercase = max_width - width
lowercase = len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCAmelCase__ ):
num_spaces_between_words_list[i] += 1
lowercase = []
for i in range(lowerCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCAmelCase__ )
lowercase = []
lowercase = []
lowercase = 0
for word in words:
if width + len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCAmelCase__ )
width += len(lowerCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
# reset new line and new width
lowercase , lowercase = [word], len(lowerCAmelCase__ )
lowercase = max_width - width - len(lowerCAmelCase__ )
answer.append(""" """.join(lowerCAmelCase__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> Any:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ) -> Any:
if args.student_type == "roberta":
lowerCamelCase_ = False
elif args.student_type == "gpt2":
lowerCamelCase_ = False
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any ) -> Optional[Any]:
if args.student_type == "roberta":
lowerCamelCase_ = False
def lowerCamelCase__ ( ) -> List[Any]:
lowerCamelCase_ = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_lowerCamelCase , required=_lowerCamelCase , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_lowerCamelCase , required=_lowerCamelCase , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_lowerCamelCase , choices=['distilbert', 'roberta', 'gpt2'] , required=_lowerCamelCase , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_lowerCamelCase , type=_lowerCamelCase , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_lowerCamelCase , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_lowerCamelCase , required=_lowerCamelCase , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_lowerCamelCase , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_lowerCamelCase , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_lowerCamelCase , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_lowerCamelCase , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_lowerCamelCase , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_lowerCamelCase , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_lowerCamelCase , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_lowerCamelCase , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_lowerCamelCase , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_lowerCamelCase , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_lowerCamelCase , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_lowerCamelCase , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_lowerCamelCase , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_lowerCamelCase , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_lowerCamelCase , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_lowerCamelCase , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_lowerCamelCase , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=_lowerCamelCase , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=_lowerCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_lowerCamelCase , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_lowerCamelCase , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_lowerCamelCase , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_lowerCamelCase , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_lowerCamelCase , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_lowerCamelCase , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_lowerCamelCase , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_lowerCamelCase , default=4000 , help='Checkpoint interval.' )
lowerCamelCase_ = parser.parse_args()
sanity_checks(_lowerCamelCase )
# ARGS #
init_gpu_params(_lowerCamelCase )
set_seed(_lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_lowerCamelCase ) , _lowerCamelCase , indent=4 )
git_log(args.dump_path )
lowerCamelCase_ = MODEL_CLASSES[args.student_type]
lowerCamelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCamelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase_ = tokenizer.all_special_tokens.index(_lowerCamelCase )
lowerCamelCase_ = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
lowerCamelCase_ = special_tok_ids
lowerCamelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
lowerCamelCase_ = pickle.load(_lowerCamelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
lowerCamelCase_ = pickle.load(_lowerCamelCase )
lowerCamelCase_ = np.maximum(_lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase_ = 0.0 # do not predict special tokens
lowerCamelCase_ = torch.from_numpy(_lowerCamelCase )
else:
lowerCamelCase_ = None
lowerCamelCase_ = LmSeqsDataset(params=_lowerCamelCase , data=_lowerCamelCase )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
lowerCamelCase_ = student_config_class.from_pretrained(args.student_config )
lowerCamelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCamelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCamelCase )
else:
lowerCamelCase_ = student_model_class(_lowerCamelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
lowerCamelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCamelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowerCamelCase , _lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowerCamelCase , _lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase_ = Distiller(
params=_lowerCamelCase , dataset=_lowerCamelCase , token_probs=_lowerCamelCase , student=_lowerCamelCase , teacher=_lowerCamelCase )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 549 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Optional[int] = DebertaTokenizer
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :int = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowerCamelCase__ : Any = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ : Union[str, Any] = {"""unk_token""": """[UNK]"""}
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def _snake_case (self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Optional[Any] = """lower newer"""
lowerCamelCase__ : Tuple = """lower newer"""
return input_text, output_text
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : List[Any] = """lower newer"""
lowerCamelCase__ : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase__ : List[Any] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Tuple = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : int = tokenizer("""Hello""" , """World""" )
lowerCamelCase__ : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : str = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCamelCase__ : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase__ : List[Any] = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowerCamelCase__ : Tuple = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowerCamelCase__ : List[Any] = tokenizer(__magic_name__ , padding=__magic_name__ )
lowerCamelCase__ : int = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["""input_ids"""]]
# fmt: off
lowerCamelCase__ : Union[str, Any] = {
"""input_ids""": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase__ : Union[str, Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 157 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
assert (
isinstance(_A , _A ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__lowerCamelCase , __lowerCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
__lowerCamelCase , __lowerCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
_a : str = tuple[float, float, float]
_a : List[Any] = tuple[float, float, float]
def UpperCamelCase__ ( _A: Pointad , _A: Pointad ):
'''simple docstring'''
__lowerCamelCase = end_pointa[0] - end_pointa[0]
__lowerCamelCase = end_pointa[1] - end_pointa[1]
__lowerCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase__ ( _A: Vectorad , _A: Vectorad ):
'''simple docstring'''
__lowerCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase__ ( _A: Vectorad , _A: int ):
'''simple docstring'''
return tuple(round(_A , _A ) for x in vector ) == (0, 0, 0)
def UpperCamelCase__ ( _A: Pointad , _A: Pointad , _A: Pointad , _A: int = 10 ):
'''simple docstring'''
__lowerCamelCase = create_vector(_A , _A )
__lowerCamelCase = create_vector(_A , _A )
return is_zero_vector(get_ad_vectors_cross(_A , _A ) , _A )
| 571 | 0 |
from __future__ import annotations
from collections import namedtuple
def UpperCamelCase_ ( __a , __a , __a ) -> tuple:
a__ : Union[str, Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = 10
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = [1, 2, 3, 4]
__lowerCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """"""
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
__lowerCamelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = ["""It was the best of times."""]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch.tensor([1, 2, 3, 4] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 1_01
__lowerCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCamelCase = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 80 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 1 |
import os
from datetime import datetime as dt
from github import Github
_lowercase: List[str] = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _lowerCamelCase ( ):
_lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
_lowerCAmelCase = g.get_repo('huggingface/accelerate' )
_lowerCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case : i.created_at , reverse=snake_case )
_lowerCAmelCase = comments[0] if len(snake_case ) > 0 else None
_lowerCAmelCase = dt.utcnow()
_lowerCAmelCase = (current_time - issue.updated_at).days
_lowerCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 192 | import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( snake_case ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , snake_case , )
if isinstance(snake_case , torch.Tensor ):
return image
elif isinstance(snake_case , PIL.Image.Image ):
_lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image[0].size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase = np.concatenate(snake_case , axis=0 )
_lowerCAmelCase = np.array(snake_case ).astype(np.floataa ) / 2_55.0
_lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase = 2.0 * image - 1.0
_lowerCAmelCase = torch.from_numpy(snake_case )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(snake_case , dim=0 )
return image
def _lowerCamelCase ( snake_case ):
if isinstance(snake_case , torch.Tensor ):
return mask
elif isinstance(snake_case , PIL.Image.Image ):
_lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = mask[0].size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_lowerCAmelCase = np.concatenate(snake_case , axis=0 )
_lowerCAmelCase = mask.astype(np.floataa ) / 2_55.0
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = torch.from_numpy(snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(snake_case , dim=0 )
return mask
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =42
UpperCamelCase__ =42
def __init__( self : int , lowercase__ : List[Any] , lowercase__ : Optional[Any] ):
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : Any , lowercase__ : Union[torch.Tensor, PIL.Image.Image] , lowercase__ : Union[torch.Tensor, PIL.Image.Image] , lowercase__ : int = 2_50 , lowercase__ : float = 0.0 , lowercase__ : int = 10 , lowercase__ : int = 10 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
_lowerCAmelCase = image
_lowerCAmelCase = _preprocess_image(lowercase__ )
_lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCAmelCase = _preprocess_mask(lowercase__ )
_lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = original_image.shape
_lowerCAmelCase = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase__ , lowercase__ , lowercase__ , self.device )
_lowerCAmelCase = eta
_lowerCAmelCase = self.scheduler.timesteps[0] + 1
_lowerCAmelCase = generator[0] if isinstance(lowercase__ , lowercase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowerCAmelCase = self.unet(lowercase__ , lowercase__ ).sample
# compute previous image: x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCAmelCase = self.scheduler.undo_step(lowercase__ , lowercase__ , lowercase__ )
_lowerCAmelCase = t
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 192 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''efficientformer'''
def __init__( self , __lowerCAmelCase = [3, 2, 6, 4] , __lowerCAmelCase = [4_8, 9_6, 2_2_4, 4_4_8] , __lowerCAmelCase = [True, True, True, True] , __lowerCAmelCase = 4_4_8 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = 4 , __lowerCAmelCase = 7 , __lowerCAmelCase = 5 , __lowerCAmelCase = 8 , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 1E-5 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1E-12 , __lowerCAmelCase = 2_2_4 , __lowerCAmelCase = 1E-05 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :List[Any] = hidden_act
__magic_name__ :Union[str, Any] = hidden_dropout_prob
__magic_name__ :Optional[int] = hidden_sizes
__magic_name__ :int = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :Optional[int] = initializer_range
__magic_name__ :Optional[Any] = layer_norm_eps
__magic_name__ :Union[str, Any] = patch_size
__magic_name__ :Dict = num_channels
__magic_name__ :Optional[Any] = depths
__magic_name__ :Optional[int] = mlp_expansion_ratio
__magic_name__ :Tuple = downsamples
__magic_name__ :List[Any] = dim
__magic_name__ :str = key_dim
__magic_name__ :Any = attention_ratio
__magic_name__ :Tuple = resolution
__magic_name__ :str = pool_size
__magic_name__ :Optional[Any] = downsample_patch_size
__magic_name__ :Any = downsample_stride
__magic_name__ :List[Any] = downsample_pad
__magic_name__ :List[str] = drop_path_rate
__magic_name__ :Union[str, Any] = num_metaad_blocks
__magic_name__ :Any = distillation
__magic_name__ :Tuple = use_layer_scale
__magic_name__ :Union[str, Any] = layer_scale_init_value
__magic_name__ :int = image_size
__magic_name__ :Tuple = batch_norm_eps
| 180 |
SCREAMING_SNAKE_CASE__ : str = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__magic_name__ :Stack[int] = Stack()
__magic_name__ :Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
__magic_name__ :Optional[int] = operator_stack.peek()
operator_stack.pop()
__magic_name__ :List[str] = operand_stack.peek()
operand_stack.pop()
__magic_name__ :Optional[Any] = operand_stack.peek()
operand_stack.pop()
__magic_name__ :Optional[int] = operators[opr](snake_case, snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 180 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_a = trt.Logger(trt.Logger.WARNING)
_a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_a = logging.getLogger(__name__)
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
_a = parser.parse_args()
if args.tokenizer_name:
_a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
_a = args.per_device_eval_batch_size
_a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_a = True
_a = """temp_engine/bert-fp32.engine"""
if args.fpaa:
_a = """temp_engine/bert-fp16.engine"""
if args.inta:
_a = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
_a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_a = [network.get_input(i) for i in range(network.num_inputs)]
_a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_a = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = np.asarray(inputs['''input_ids'''], dtype=np.intaa )
_UpperCamelCase = np.asarray(inputs['''attention_mask'''], dtype=np.intaa )
_UpperCamelCase = np.asarray(inputs['''token_type_ids'''], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), __snake_case )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), __snake_case )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), __snake_case )
# start time
_UpperCamelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(__snake_case ) for d_inp in d_inputs] + [int(__snake_case ), int(__snake_case )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__snake_case, __snake_case, __snake_case )
cuda.memcpy_dtoh_async(__snake_case, __snake_case, __snake_case )
# Synchronize the stream and take time
stream.synchronize()
# end time
_UpperCamelCase = time.time()
_UpperCamelCase = end_time - start_time
_UpperCamelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_a = raw_datasets["""validation"""].column_names
_a = """question""" if """question""" in column_names else column_names[0]
_a = """context""" if """context""" in column_names else column_names[1]
_a = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_a = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_a = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_UpperCamelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='''only_second''' if pad_on_right else '''only_first''', max_length=__snake_case, stride=args.doc_stride, return_overflowing_tokens=__snake_case, return_offsets_mapping=__snake_case, padding='''max_length''', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_UpperCamelCase = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_UpperCamelCase = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_UpperCamelCase = tokenized_examples.sequence_ids(__snake_case )
_UpperCamelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_UpperCamelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_UpperCamelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_a = raw_datasets["""validation"""]
# Validation Feature Creation
_a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
_a = default_data_collator
_a = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
_a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case="eval" ) -> str:
"""simple docstring"""
_UpperCamelCase = postprocess_qa_predictions(
examples=__snake_case, features=__snake_case, predictions=__snake_case, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=__snake_case, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_UpperCamelCase = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
_UpperCamelCase = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
_UpperCamelCase = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__snake_case, label_ids=__snake_case )
_a = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__snake_case ) ) * engine.get_binding_dtype(__snake_case ).itemsize
# Allocate device memory for inputs and outputs.
_a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_a = cuda.mem_alloc(h_outputa.nbytes)
_a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_a = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
_a = 0.0
_a = 0
_a = timeit.default_timer()
_a = None
for step, batch in enumerate(eval_dataloader):
_a , _a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_a , _a = outputs
_a = torch.tensor(start_logits)
_a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_a = nested_truncate(all_preds, len(eval_dataset))
_a = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
_a = post_processing_function(eval_examples, eval_dataset, all_preds)
_a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 19 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1]
for i in range(2 , _SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = list(range(_SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE = factorials.pop()
SCREAMING_SNAKE_CASE = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = metric_id
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = [MetricMock(lowerCAmelCase_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if "tmp_path" in args:
SCREAMING_SNAKE_CASE = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(_SCREAMING_SNAKE_CASE , match="""https://huggingface.co/docs/evaluate""" ):
func(*_SCREAMING_SNAKE_CASE )
| 116 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Union[str, Any]:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase_ , model.state_dict())
UpperCamelCase = os.path.join(lowerCAmelCase_ , '''index.json''')
self.assertTrue(os.path.isfile(lowerCAmelCase_))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(lowerCAmelCase_ , F'{key}.dat')
self.assertTrue(os.path.isfile(lowerCAmelCase_))
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=lowerCAmelCase_)
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(lowerCAmelCase_ , '''weight''' , lowerCAmelCase_ , {})
UpperCamelCase = os.path.join(lowerCAmelCase_ , '''weight.dat''')
self.assertTrue(os.path.isfile(lowerCAmelCase_))
self.assertDictEqual(lowerCAmelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCAmelCase_).split('''.''')[1]}})
UpperCamelCase = load_offloaded_weight(lowerCAmelCase_ , index['''weight'''])
self.assertTrue(torch.equal(lowerCAmelCase_ , lowerCAmelCase_))
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase_ , lowerCAmelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCAmelCase_ , save_folder=lowerCAmelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase_ , weight_map[key]))
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase_ , lowerCAmelCase_)
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCAmelCase_ , save_folder=lowerCAmelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase_ , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase_ , lowerCAmelCase_)
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=lowerCAmelCase_ , save_folder=lowerCAmelCase_)
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase_) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase_ , weight_map[key]))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCAmelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCAmelCase_ , {'''a.1''': 0, '''a.2''': 2})
UpperCamelCase = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCamelCase = extract_submodules_state_dict(lowerCAmelCase_ , ['''a.1''', '''a.2'''])
self.assertDictEqual(lowerCAmelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2}) | 34 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase :ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase :ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase :str = "text"
lowerCamelCase :str = "labels"
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_A = copy.deepcopy(self )
_A = self.label_schema.copy()
_A = features[self.label_column]
_A = label_schema
return task_template
@property
def UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 401 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( UpperCAmelCase ):
_lowercase = "deit"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=224 , A_=16 , A_=3 , A_=True , A_=16 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : int = qkv_bias
_UpperCAmelCase : str = encoder_stride
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-4
| 467 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: Any=None , **lowerCAmelCase: Any ) -> Dict:
_UpperCAmelCase : Optional[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()]
_UpperCAmelCase : str = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )]
_UpperCAmelCase : str = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
if save_path is not None:
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 467 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> str:
if not sentence:
return ""
lowerCAmelCase__ : List[Any] = dict(zip(lowercase__ , lowercase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 453 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'beit'
def __init__( self , snake_case=8_192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1e-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ):
"""simple docstring"""
super().__init__(**snake_case )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Optional[Any] = use_mask_token
lowerCAmelCase__ : Dict = use_absolute_position_embeddings
lowerCAmelCase__ : Any = use_relative_position_bias
lowerCAmelCase__ : List[Any] = use_shared_relative_position_bias
lowerCAmelCase__ : Dict = layer_scale_init_value
lowerCAmelCase__ : Optional[int] = drop_path_rate
lowerCAmelCase__ : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ : Optional[int] = out_indices
lowerCAmelCase__ : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : List[Any] = use_auxiliary_head
lowerCAmelCase__ : Optional[int] = auxiliary_loss_weight
lowerCAmelCase__ : List[str] = auxiliary_channels
lowerCAmelCase__ : Optional[Any] = auxiliary_num_convs
lowerCAmelCase__ : Union[str, Any] = auxiliary_concat_input
lowerCAmelCase__ : List[str] = semantic_loss_ignore_index
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-4
| 453 | 1 |
'''simple docstring'''
import math
def _lowerCamelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float ):
"""simple docstring"""
return math.pow(lowerCamelCase_ , 2 ) - a
def _lowerCamelCase ( lowerCamelCase_ : float ):
"""simple docstring"""
return 2 * x
def _lowerCamelCase ( lowerCamelCase_ : float ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 2.0
while start <= a:
UpperCAmelCase_ : Tuple = math.pow(lowerCamelCase_ , 2 )
return start
def _lowerCamelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 9999 , lowerCamelCase_ : float = 0.00_0000_0000_0001 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
UpperCAmelCase_ : Dict = get_initial_point(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = value
UpperCAmelCase_ : Any = value - fx(lowerCamelCase_ , lowerCamelCase_ ) / fx_derivative(lowerCamelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 389 | 0 |
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = psutil.Process()
_UpperCAmelCase = False
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = -1
while True:
_UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = True
_UpperCAmelCase = threading.Thread(target=self.peak_monitor )
_UpperCAmelCase = True
self.thread.start()
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
__a: Union[str, Any] = PeakCPUMemory()
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# Time
_UpperCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
# Time
_UpperCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
_UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**2_0
_UpperCAmelCase = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**2_0
return measures
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
_UpperCAmelCase = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" ) | 108 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'bart'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , _lowerCamelCase=50265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=3 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : str = encoder_ffn_dim
UpperCAmelCase__ : List[str] = encoder_layers
UpperCAmelCase__ : Optional[int] = encoder_attention_heads
UpperCAmelCase__ : Optional[int] = decoder_ffn_dim
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : int = decoder_attention_heads
UpperCAmelCase__ : Optional[Any] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : List[str] = init_std
UpperCAmelCase__ : Dict = encoder_layerdrop
UpperCAmelCase__ : Any = decoder_layerdrop
UpperCAmelCase__ : List[str] = classifier_dropout
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : Union[str, Any] = encoder_layers
UpperCAmelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ : Dict = {0: """batch"""}
UpperCAmelCase__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a (self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Any = super().outputs
else:
UpperCAmelCase__ : Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
UpperCAmelCase__ : int = seq_length if not self.use_past else 1
UpperCAmelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : Union[str, Any] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : str = common_inputs["""input_ids"""].shape
UpperCAmelCase__ : int = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.num_attention_heads
UpperCAmelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = decoder_seq_length + 3
UpperCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
UpperCAmelCase__ : Union[str, Any] = min(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
UpperCAmelCase__ : Dict = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
UpperCAmelCase__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : int = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ : str = self.num_attention_heads
UpperCAmelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : Optional[int] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase__ : List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
UpperCAmelCase__ : Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
UpperCAmelCase__ : int = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : Optional[int] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
UpperCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
UpperCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Union[str, Any] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ : Optional[int] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 182 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : torch.FloatTensor
__lowercase : Optional[torch.FloatTensor] = None
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Optional[int]=0.9_9_9, _UpperCamelCase : List[str]="cosine", ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase : Optional[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase : Tuple ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(lowerCamelCase__ ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = "fixed_small_log" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = "squaredcos_cap_v2" , ) -> List[Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
A_ = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy() )
A_ = variance_type
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> torch.FloatTensor:
return sample
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> int:
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(_SCREAMING_SNAKE_CASE , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ ,A_ = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
_SCREAMING_SNAKE_CASE , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE , device=model_output.device )
A_ = self._get_variance(
_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> torch.FloatTensor:
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 721 | '''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCAmelCase ( _UpperCamelCase : int = 1_50_00_00 ) -> int:
A_ = defaultdict(_UpperCamelCase )
A_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, _UpperCamelCase, 2 ):
if gcd(_UpperCamelCase, _UpperCamelCase ) > 1:
continue
A_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase, limit + 1, _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.