code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = 'deformable_detr'
A_ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=300 , __UpperCAmelCase=1024 , __UpperCAmelCase=6 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=False , __UpperCAmelCase=300 , __UpperCAmelCase=False , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.25 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = backbone_config.get('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__UpperCAmelCase )
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
_a = disable_custom_kernels
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> int:
return self.d_model
def _UpperCAmelCase ( self ) -> str:
_a = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 320 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 320 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase__ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , snake_case__ = " " ):
"""simple docstring"""
lowerCAmelCase : List[Any] = sentence_delimiter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return list(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = []
for sent_idx, sentence in enumerate(snake_case__ ):
chars.extend(self.process_string(snake_case__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase__ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCAmelCase__ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowerCAmelCase__ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )["wer"]
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 133 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__:List[Any] = """Input must be a string of 8 numbers plus letter"""
SCREAMING_SNAKE_CASE__:Any = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _lowerCamelCase( a ):
if not isinstance(a , a ):
__a = F"Expected string as input, found {type(a ).__name__}"
raise TypeError(a )
__a = spanish_id.replace("-" , "" ).upper()
if len(a ) != 9:
raise ValueError(a )
try:
__a = int(spanish_id_clean[0:8] )
__a = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a ) from ex
if letter.isdigit():
raise ValueError(a )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
return model
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModel(__lowerCAmelCase )
@property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
def extract(*__lowerCamelCase ,**__lowerCamelCase ):
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.ones([0] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[str] = self.dummy_cond_unet
lowerCAmelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=__lowerCAmelCase ,set_alpha_to_one=__lowerCAmelCase ,)
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : Optional[Any] = self.dummy_text_encoder
lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : str = StableDiffusionPipeline(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase__ : Tuple = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : Tuple = sd_pipe([prompt] ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' )
lowerCAmelCase__ : int = output.images
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : Tuple = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=__lowerCAmelCase ,)[0]
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase__ : int = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowerCAmelCase__ : List[str] = self.dummy_vae
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Dict = StableDiffusionPipeline(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase__ : Dict = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Optional[int] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : int = sd_pipe([prompt] ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' )
lowerCAmelCase__ : Dict = output.images
lowerCAmelCase__ : Optional[int] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ : List[str] = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=__lowerCAmelCase ,)[0]
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' ,safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase ,__lowerCAmelCase )
assert isinstance(pipe.scheduler ,__lowerCAmelCase )
assert pipe.safety_checker is None
lowerCAmelCase__ : int = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ : Dict = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
lowerCAmelCase__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.dummy_vae
lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowerCAmelCase__ : Optional[int] = unet.half()
lowerCAmelCase__ : Union[str, Any] = vae.half()
lowerCAmelCase__ : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : str = StableDiffusionPipeline(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,safety_checker=__lowerCAmelCase ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase__ : Dict = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase__ : List[str] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Tuple = sd_pipe([prompt] ,num_inference_steps=2 ,output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ,safety_checker=__lowerCAmelCase )
lowerCAmelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase__ : int = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowerCAmelCase__ : Dict = 40_03_66_03_46
lowerCAmelCase__ : Tuple = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase__ : str = torch.manual_seed(__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=__lowerCAmelCase ,num_inference_steps=50 ,output_type='''np''' ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,)
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(__lowerCAmelCase )
lowerCAmelCase__ : List[str] = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=__lowerCAmelCase ,num_inference_steps=50 ,output_type='''np''' ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
lowerCAmelCase__ : str = output.images
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
lowerCAmelCase__ : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ,safety_checker=__lowerCAmelCase )
lowerCAmelCase__ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase__ : Tuple = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase__ : int = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowerCAmelCase__ : Any = 27_34_97_17_55
lowerCAmelCase__ : Dict = 7
lowerCAmelCase__ : int = torch.manual_seed(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=__lowerCAmelCase ,num_inference_steps=50 ,output_type='''np''' ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,)
lowerCAmelCase__ : int = output.images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : List[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=__lowerCAmelCase ,num_inference_steps=50 ,output_type='''np''' ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowerCAmelCase__ : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowerCAmelCase__ : str = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowerCAmelCase__ : Any = 10_44_35_52_34
lowerCAmelCase__ : Dict = 12
lowerCAmelCase__ : Dict = torch.manual_seed(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=__lowerCAmelCase ,num_inference_steps=50 ,output_type='''np''' ,width=5_12 ,height=5_12 ,sld_guidance_scale=0 ,)
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = sd_pipe(
[prompt] ,generator=__lowerCAmelCase ,guidance_scale=__lowerCAmelCase ,num_inference_steps=50 ,output_type='''np''' ,width=5_12 ,height=5_12 ,sld_guidance_scale=20_00 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 360 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( lowerCamelCase_ : ndarray):
'''simple docstring'''
return np.dot(lowerCamelCase_ ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,*,
__lowerCamelCase = np.inf ,__lowerCamelCase = "linear" ,__lowerCamelCase = 0.0 ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = regularization
lowerCAmelCase__ : str = gamma
if kernel == "linear":
lowerCAmelCase__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCAmelCase__ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.dot(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = observations
lowerCAmelCase__ : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) : List[str] = np.shape(__lowerCamelCase )
def to_minimize(__lowerCamelCase ) -> float:
lowerCAmelCase__ : List[str] = 0
((lowerCAmelCase__) , ) : str = np.shape(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(__lowerCamelCase )
lowerCAmelCase__ : List[str] = LinearConstraint(__lowerCamelCase ,0 ,0 )
lowerCAmelCase__ : List[str] = Bounds(0 ,self.regularization )
lowerCAmelCase__ : int = minimize(
__lowerCamelCase ,np.ones(__lowerCamelCase ) ,bounds=__lowerCamelCase ,constraints=[ly_contraint] ).x
lowerCAmelCase__ : List[Any] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ : Optional[Any] = 0
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
lowerCAmelCase__ : Dict = s / n
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,__lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__A =2_0_4_8
__A =4_0_9_6
__A =4_2
__A =os.environ.pop('''PROCESS_TRAIN''', '''false''')
__A ={'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCamelCase_ ( lowerCamelCase__ ):
def choose_first(lowerCamelCase__ , lowerCamelCase__=False ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
lowerCamelCase_ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCamelCase_ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCamelCase_ = {"id": example["id"]}
lowerCamelCase_ = example["annotations"]
lowerCamelCase_ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCamelCase_ = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCamelCase_ = lowerCamelCase_ = []
lowerCamelCase_ = lowerCamelCase_ = []
lowerCamelCase_ = ["<cls>"]
else:
lowerCamelCase_ = ["short"]
lowerCamelCase_ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCamelCase_ = ["long"]
lowerCamelCase_ = choose_first(annotation["long_answer"] , is_long_answer=lowerCamelCase__ )
lowerCamelCase_ = []
answer.update(lowerCamelCase__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCamelCase_ = True
else:
lowerCamelCase_ = False
lowerCamelCase_ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowerCamelCase__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = _get_single_answer(lowerCamelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase_ = example["document"]["tokens"]
lowerCamelCase_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCamelCase__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCamelCase_ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCamelCase_ = example["document"]["tokens"]
lowerCamelCase_ = answer["start_token"]
lowerCamelCase_ = answer["end_token"]
lowerCamelCase_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCamelCase_ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCamelCase_ = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCamelCase_ = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCamelCase_ = " ".join([old[i] for i in range(len(lowerCamelCase__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCamelCase__ , end="\n" )
print("Old:" , lowerCamelCase__ , end="\n\n" )
return {
"context": " ".join(lowerCamelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=True ):
# overlap will be of doc_stride - q_len
lowerCamelCase_ = get_context_and_ans(lowerCamelCase__ , assertion=lowerCamelCase__ )
lowerCamelCase_ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCamelCase_ = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCamelCase_ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = input_ids[:q_len]
lowerCamelCase_ = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCamelCase_ = i + max_length - q_len
lowerCamelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase__ ),
"end_token": [-1_0_0] * len(lowerCamelCase__ ),
"category": category,
},
}
lowerCamelCase_ = out["context"].split()
lowerCamelCase_ = splitted_context[answer["end_token"]]
lowerCamelCase_ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCamelCase__ , ).input_ids )
lowerCamelCase_ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCamelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCamelCase_ = len(tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCamelCase_ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCamelCase_ = answer["start_token"]
lowerCamelCase_ = answer["end_token"]
if assertion:
lowerCamelCase_ = tokenizer.decode(lowerCamelCase__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCamelCase__ , end="\n\n" )
if len(lowerCamelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCamelCase_ = input_ids[:q_len]
lowerCamelCase_ = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride )
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCamelCase_ = i + max_length - q_len
lowerCamelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCamelCase_ = start_token - i + q_len
lowerCamelCase_ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCamelCase_ = -1_0_0
lowerCamelCase_ = -1_0_0
answers_category.append("null" )
lowerCamelCase_ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase__ )
answers_end_token.append(lowerCamelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCamelCase__ ) )
print("Old:" , tokenizer.decode(lowerCamelCase__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=False ):
lowerCamelCase_ = get_strided_contexts_and_ans(
lowerCamelCase__ , lowerCamelCase__ , doc_stride=lowerCamelCase__ , max_length=lowerCamelCase__ , assertion=lowerCamelCase__ , )
return example
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
with jsonlines.open(lowerCamelCase__ , "a" ) as writer:
for example in tqdm(lowerCamelCase__ , total=len(lowerCamelCase__ ) , desc="Saving samples ... " ):
lowerCamelCase_ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__A =load_dataset('''natural_questions''')
__A =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__A =data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__A ={
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__A =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__A =data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__A ='''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __snake_case( _lowerCAmelCase=32 , _lowerCAmelCase=10 , _lowerCAmelCase=100 , _lowerCAmelCase=1_026 , _lowerCAmelCase=True , _lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , _lowerCAmelCase="igf_context_pairs.jbl" , ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
snake_case__ , snake_case__ : int = generate_datasets(
_lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1_026 , trim=_lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
snake_case__ : Union[str, Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
snake_case__ : Optional[int] = load_gpta("""gpt2""" ).to(_lowerCAmelCase )
print("""computing perplexity on objective set""" )
snake_case__ : List[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item()
print("""perplexity on objective set:""" , _lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=15 , _lowerCAmelCase=128 , _lowerCAmelCase=100 , _lowerCAmelCase="igf_model.pt" , ) -> Union[str, Any]:
set_seed(42 )
# Load pre-trained model
snake_case__ : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
snake_case__ : str = SecondaryLearner(_lowerCAmelCase )
# Train secondary learner
snake_case__ : List[Any] = train_secondary_learner(
_lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=32 , _lowerCAmelCase=1_000 , _lowerCAmelCase=16 , _lowerCAmelCase=1.0 , _lowerCAmelCase=recopy_gpta , _lowerCAmelCase=None , _lowerCAmelCase=10 , _lowerCAmelCase="gpt2_finetuned.pt" , ) -> Union[str, Any]:
snake_case__ : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
snake_case__ : Optional[Any] = RandomSampler(_lowerCAmelCase )
snake_case__ : Any = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase )
snake_case__ : Optional[Any] = max_steps // (len(_lowerCAmelCase )) + 1
snake_case__ : Dict = 0
snake_case__ : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ : Any = recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCAmelCase )
secondary_learner.eval()
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = 0
snake_case__ : Optional[Any] = []
snake_case__ : Any = []
# Compute the performance of the transformer model at the beginning
snake_case__ : Optional[int] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print("""Test perplexity, step""" , _lowerCAmelCase , """:""" , _lowerCAmelCase )
for epoch in range(int(_lowerCAmelCase ) ):
for step, example in enumerate(_lowerCAmelCase ):
torch.cuda.empty_cache()
snake_case__ : List[str] = random.randint(0 , example.size(2 ) - context_len - 1 )
snake_case__ : Optional[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
snake_case__ : Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
snake_case__ : Optional[Any] = True
if secondary_learner is not None:
snake_case__ : Optional[Any] = secondary_learner.forward(
torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
snake_case__ : int = -1
if predicted_q < threshold:
snake_case__ : Union[str, Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
snake_case__ : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
snake_case__ : Optional[int] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
snake_case__ : int = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print("""Test perplexity, step""" , _lowerCAmelCase , """:""" , _lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __snake_case( ) -> Optional[int]:
snake_case__ : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=_lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=_lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=_lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=_lowerCAmelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=_lowerCAmelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=_lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=_lowerCAmelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=_lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=_lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=_lowerCAmelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=_lowerCAmelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_lowerCAmelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
snake_case__ : Optional[int] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
snake_case__ : int = training_secondary_learner(
_lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
snake_case__ : Dict = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
snake_case__ , snake_case__ : Dict = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=_lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 43 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] ):
raise NotImplementedError()
def lowerCamelCase ( self : Optional[int] ):
raise NotImplementedError()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ):
snake_case__ : Tuple = tokenizer
snake_case__ : List[str] = skip_prompt
snake_case__ : Optional[int] = decode_kwargs
# variables used in the streaming process
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = True
def lowerCamelCase ( self : List[str] , snake_case_ : int ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
snake_case__ : Optional[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case__ : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case__ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
snake_case__ : int = text[self.print_len :]
snake_case__ : Optional[int] = []
snake_case__ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case__ : str = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def lowerCamelCase ( self : int ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
snake_case__ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case__ : Optional[Any] = text[self.print_len :]
snake_case__ : Tuple = []
snake_case__ : int = 0
else:
snake_case__ : int = """"""
snake_case__ : Union[str, Any] = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : bool = False ):
print(snake_case_ , flush=snake_case_ , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , snake_case_ : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : List[Any] ):
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
snake_case__ : Dict = Queue()
snake_case__ : List[Any] = None
snake_case__ : int = timeout
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : bool = False ):
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[str] ):
return self
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 43 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase__ : List[str] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'RUCAIBox/mvp': 1_024,
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = MvpTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[int]="replace" , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : Optional[int]="</s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Dict="<pad>" , _lowerCAmelCase : Union[str, Any]="<mask>" , _lowerCAmelCase : Any=False , _lowerCAmelCase : str=True , **_lowerCAmelCase : int , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = pre_tok_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE_ = 'post_processor'
SCREAMING_SNAKE_CASE_ = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['cls'] )
SCREAMING_SNAKE_CASE_ = False
if state.get('add_prefix_space' , _lowerCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = True
if state.get('trim_offsets' , _lowerCAmelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE_ = trim_offsets
SCREAMING_SNAKE_CASE_ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value
SCREAMING_SNAKE_CASE_ = value
def lowerCAmelCase_ ( self : int , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words' , _lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , *_lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words' , _lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=None ):
SCREAMING_SNAKE_CASE_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 225 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCAmelCase ) ),
} , features=__UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase__ : List[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
SCREAMING_SNAKE_CASE_ = FILE_CONTENT
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with gzip.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lza.frame.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCAmelCase , 'w' ) as archive:
archive.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> str:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[Any]:
import lzma
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lzma.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> str:
import zipfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with zstd.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
SCREAMING_SNAKE_CASE_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
lowerCamelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCamelCase__ : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCamelCase__ : Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ : List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCamelCase__ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> str:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
SCREAMING_SNAKE_CASE_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCAmelCase , 'wb' ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCAmelCase ) )] for k in DATA[0]} , schema=__UpperCAmelCase )
writer.write_table(__UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[str]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 225 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Tuple = len(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = len(matrix[0] )
_UpperCamelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
for row in range(UpperCAmelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCAmelCase_ ):
_UpperCamelCase : int = matrix[col][row] / matrix[row][row]
for i in range(UpperCAmelCase_ , UpperCAmelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_UpperCamelCase : Tuple = True
for i in range(row + 1 , UpperCAmelCase_ ):
if matrix[i][row] != 0:
_UpperCamelCase : Optional[Any] = matrix[i], matrix[row]
_UpperCamelCase : Optional[int] = False
break
if reduce:
rank -= 1
for i in range(UpperCAmelCase_ ):
_UpperCamelCase : str = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_UpperCamelCase : Any = 1_0_2_4
_UpperCamelCase : List[Any] = 4_0_9_6
_UpperCamelCase : List[str] = 2_4
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Union[str, Any] = [5, 1_1, 1_7, 2_3]
_UpperCamelCase : Any = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_UpperCamelCase : Tuple = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase : Optional[int] = 7_6_8
_UpperCamelCase : Optional[Any] = [1, 1, 1, 0.5]
_UpperCamelCase : List[Any] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
_UpperCamelCase : Optional[int] = 1_5_0
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Dict = (1, 3_8_4, 3_8_4)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = 'project'
if "ade" in checkpoint_url:
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = 7_6_8
_UpperCamelCase : Union[str, Any] = [1, 1, 1, 0.5]
_UpperCamelCase : Union[str, Any] = 1_5_0
_UpperCamelCase : str = 1_6
_UpperCamelCase : Tuple = 'huggingface/label-files'
_UpperCamelCase : Tuple = 'ade20k-id2label.json'
_UpperCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : str = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : List[str] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_UpperCamelCase : int = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_UpperCamelCase : Any = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_UpperCamelCase : Tuple = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_UpperCamelCase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_UpperCamelCase : int = name.replace('proj' , 'projection' )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_UpperCamelCase : Dict = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_UpperCamelCase : List[str] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_UpperCamelCase : List[str] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_UpperCamelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_UpperCamelCase : int = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_UpperCamelCase : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_UpperCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : str = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_UpperCamelCase : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_UpperCamelCase : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_UpperCamelCase : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_UpperCamelCase : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_UpperCamelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_UpperCamelCase : Dict = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_UpperCamelCase : str = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_UpperCamelCase : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_UpperCamelCase : List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_UpperCamelCase : Dict = name.replace('..' , '.' )
if "stem.conv" in name:
_UpperCamelCase : Tuple = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase : List[str] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_UpperCamelCase : Union[str, Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase : Dict = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_UpperCamelCase : str = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase : Tuple = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A__ ( ):
_UpperCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : int = get_dpt_config(UpperCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Any = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : int = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = DPTForSemanticSegmentation(UpperCAmelCase_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : Tuple = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_UpperCamelCase : Any = DPTImageProcessor(size=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='pt' )
# forward pass
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ ).logits if 'ade' in checkpoint_url else model(**UpperCAmelCase_ ).predicted_depth
if show_prediction:
_UpperCamelCase : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=UpperCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def _snake_case ( _snake_case : Any ) -> str:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
_A = set(_UpperCAmelCase )
return pairs
class lowercase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : int="<mask>" , **_UpperCAmelCase : Tuple , ):
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_A = vocab_file
_A = merges_file
_A = {}
_A = 0
_A = 1
_A = 2
_A = 3
self.add_from_file(_UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle:
_A = merges_handle.read().split('\n' )[:-1]
_A = [tuple(merge.split()[:-1] ) for merge in merges]
_A = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_A = {}
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self : Dict ):
return len(self.encoder )
def lowerCAmelCase_ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] ):
if token in self.cache:
return self.cache[token]
_A = tuple(_UpperCAmelCase )
_A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_A = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_A = bigram
_A = []
_A = 0
while i < len(_UpperCAmelCase ):
try:
_A = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(_UpperCAmelCase )
_A = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(_UpperCAmelCase )
_A = '@@ '.join(_UpperCAmelCase )
_A = word[:-4]
_A = word
return word
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str] ):
_A = []
_A = re.findall(r'\S+\n?' , _UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Dict ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any] ):
_A = ' '.join(_UpperCAmelCase ).replace('@@ ' , '' ).strip()
return out_string
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.merges_file , _UpperCAmelCase )
return out_vocab_file, out_merge_file
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_A = f.readlines()
for lineTmp in lines:
_A = lineTmp.strip()
_A = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
_A = line[:idx]
_A = len(self.encoder )
| 315 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase_ ( __lowerCamelCase):
"""simple docstring"""
snake_case__ : Dict = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Any , ) -> Dict:
super().__init__(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> Dict:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCamelCase_ , size=size["shortest_edge"] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> Optional[int]:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ) -> Optional[Any]:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) -> Dict:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCamelCase_ , param_name="size" , default_to_square=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCamelCase_ , param_name="crop_size" , default_to_square=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__SCREAMING_SNAKE_CASE = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 357 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_6_0_0_0 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
__SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase__ )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !="""
F""" {type(UpperCAmelCase__ )}.""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 195 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.1.bias""" ,""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.1.weight""" ,""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.3.bias""" ,""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_SCREAMING_SNAKE_CASE = key.replace(""".model.3.weight""" ,""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_SCREAMING_SNAKE_CASE = key.replace("""conditioner_blocks.0""" ,"""conditioner_blocks""" )
if "prime_prior" in key:
_SCREAMING_SNAKE_CASE = key.replace("""prime_prior""" ,"""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_SCREAMING_SNAKE_CASE = key.replace(""".emb.""" ,""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" ,""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" ,"""metadata_embedding.""" )
if "x_emb.emb." in key:
_SCREAMING_SNAKE_CASE = key.replace("""0.x_emb.emb""" ,"""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" ,"""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" ,""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" ,"""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" ,"""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" ,"""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" ,"""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" ,"""embed_tokens""" )
return key
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
import re
_SCREAMING_SNAKE_CASE = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_SCREAMING_SNAKE_CASE = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] )
_SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(_a ,_a )
elif re_encoder_block_resnet.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] )
_SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
_SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
_SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = prefix + resnet_block
_SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(_a ,_a )
elif re_encoder_block_proj_out.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(_a ,_a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2
_SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(_a ,_a )
elif re_decoder_block_resnet.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2
_SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
_SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
_SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = prefix + resnet_block
_SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(_a ,_a )
elif re_decoder_block_proj_in.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(_a ,_a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2
_SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(_a ,_a )
elif re_prior_cond_resnet.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2
_SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
_SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
_SCREAMING_SNAKE_CASE = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_SCREAMING_SNAKE_CASE = prefix + resnet_block
_SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(_a ,_a )
elif re_prior_cond_proj_in.fullmatch(_a ):
_SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(_a )
_SCREAMING_SNAKE_CASE = regex_match.groups()
_SCREAMING_SNAKE_CASE = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
_SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(_a ,_a )
# keep original key
else:
_SCREAMING_SNAKE_CASE = original_key
_SCREAMING_SNAKE_CASE = replace_key(_a )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
_SCREAMING_SNAKE_CASE = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
_SCREAMING_SNAKE_CASE = original_key
_SCREAMING_SNAKE_CASE = original_key
_SCREAMING_SNAKE_CASE = value
return new_dict
@torch.no_grad()
def __lowerCamelCase ( snake_case__=None ,snake_case__=None ) -> Any:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
_SCREAMING_SNAKE_CASE = requests.get(F'{PREFIX}{file}' ,allow_redirects=_a )
os.makedirs(F'{pytorch_dump_folder_path}/' ,exist_ok=_a )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ,"""wb""" ).write(r.content )
_SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(_a )
_SCREAMING_SNAKE_CASE = JukeboxModel(_a )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
for i, dict_name in enumerate(_a ):
_SCREAMING_SNAKE_CASE = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"]
_SCREAMING_SNAKE_CASE = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_SCREAMING_SNAKE_CASE = old_dic[k]
elif k.endswith(""".w""" ):
_SCREAMING_SNAKE_CASE = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_SCREAMING_SNAKE_CASE = old_dic[k]
else:
_SCREAMING_SNAKE_CASE = old_dic[k]
_SCREAMING_SNAKE_CASE = "vqvae" if i == 0 else F'priors.{3 - i}'
_SCREAMING_SNAKE_CASE = fix_jukebox_keys(_a ,model.state_dict() ,_a ,_a )
weight_dict.append(_a )
_SCREAMING_SNAKE_CASE = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(F'{pytorch_dump_folder_path}/mapping.json' ,"""w""" ) as txtfile:
json.dump(_a ,_a )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 306 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 76 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : Tuple = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
"""simple docstring"""
_snake_case : Any = ["""pixel_values"""]
def __init__( self : str , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : int , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 384}
_UpperCamelCase = get_size_dict(a__ , default_to_square=a__ )
_UpperCamelCase = do_resize
_UpperCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCamelCase = crop_pct if crop_pct is not None else 224 / 256
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : float , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
_UpperCamelCase = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCamelCase = int(shortest_edge / crop_pct )
_UpperCamelCase = get_resize_output_image_size(a__ , size=a__ , default_to_square=a__ )
_UpperCamelCase = resize(image=a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a__ , size=(shortest_edge, shortest_edge) , data_format=a__ , **a__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a__ , size=(shortest_edge, shortest_edge) , resample=a__ , data_format=a__ , **a__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ) -> int:
'''simple docstring'''
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : int , ) -> Dict:
'''simple docstring'''
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(a__ , default_to_square=a__ )
_UpperCamelCase = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(a__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=a__ , size=a__ , crop_pct=a__ , resample=a__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(a__ , a__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 360 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : List[Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 287 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE_: Tuple =get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
SCREAMING_SNAKE_CASE_: Tuple =json.load(f)
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : str , __a : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(__a )
def _lowercase (self : List[Any] , __a : str ):
UpperCAmelCase_ = FSMTForConditionalGeneration.from_pretrained(__a ).to(__a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _lowercase (self : Tuple , __a : str , __a : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase_ = f"""facebook/wmt19-{pair}"""
UpperCAmelCase_ = self.get_tokenizer(__a )
UpperCAmelCase_ = self.get_model(__a )
UpperCAmelCase_ = bleu_data[pair]["src"]
UpperCAmelCase_ = bleu_data[pair]["tgt"]
UpperCAmelCase_ = tokenizer(__a , return_tensors="pt" , truncation=__a , padding="longest" ).to(__a )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase_ = tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
UpperCAmelCase_ = calculate_bleu(__a , __a )
print(__a )
self.assertGreaterEqual(scores["bleu"] , __a )
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A: Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : str = ["""pixel_values"""]
def __init__( self, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = PILImageResampling.BILINEAR, lowerCamelCase = True, lowerCamelCase = 1 / 2_55, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = size if size is not None else {'shortest_edge': 3_84}
_lowercase : Union[str, Any] = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase)
_lowercase : Optional[Any] = do_resize
_lowercase : str = size
# Default value set here for backwards compatibility where the value in config is None
_lowercase : List[Any] = crop_pct if crop_pct is not None else 2_24 / 2_56
_lowercase : Tuple = resample
_lowercase : List[str] = do_rescale
_lowercase : Dict = rescale_factor
_lowercase : List[str] = do_normalize
_lowercase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = PILImageResampling.BICUBIC, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray:
"""simple docstring"""
_lowercase : Dict = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase)
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''')
_lowercase : Tuple = size['shortest_edge']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowercase : str = int(shortest_edge / crop_pct)
_lowercase : int = get_resize_output_image_size(lowerCamelCase, size=lowerCamelCase, default_to_square=lowerCamelCase)
_lowercase : List[str] = resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase, **lowerCamelCase)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase, size=(shortest_edge, shortest_edge), resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> Any:
"""simple docstring"""
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, **lowerCamelCase, ) -> PIL.Image.Image:
"""simple docstring"""
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : Any = crop_pct if crop_pct is not None else self.crop_pct
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
_lowercase : List[str] = image_std if image_std is not None else self.image_std
_lowercase : Dict = size if size is not None else self.size
_lowercase : Any = get_size_dict(lowerCamelCase, default_to_square=lowerCamelCase)
_lowercase : Dict = make_list_of_images(lowerCamelCase)
if not valid_images(lowerCamelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(lowerCamelCase) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=lowerCamelCase, size=lowerCamelCase, crop_pct=lowerCamelCase, resample=lowerCamelCase) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=lowerCamelCase, scale=lowerCamelCase) for image in images]
if do_normalize:
_lowercase : Tuple = [self.normalize(image=lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase) for image in images]
_lowercase : List[str] = [to_channel_dimension_format(lowerCamelCase, lowerCamelCase) for image in images]
_lowercase : str = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase)
| 84 |
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE : Tuple = "docs/source/en/_toctree.yml"
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Tuple = defaultdict(lowerCamelCase_ )
_lowercase : int = []
_lowercase : str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCamelCase_ )
_lowercase : Optional[int] = new_doc_list
_lowercase : str = [key for key, value in counts.items() if value > 1]
_lowercase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowercase : Optional[Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowercase : Optional[int] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def UpperCamelCase_( lowerCamelCase_=False ) -> Any:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : Tuple = content[api_idx]['sections']
# Then to the model doc
_lowercase : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowercase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowercase : List[Any] = clean_doc_toc(lowerCamelCase_ )
_lowercase : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
_lowercase : Optional[Any] = True
if overwrite:
_lowercase : str = new_scheduler_doc
if diff:
if overwrite:
_lowercase : Any = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase_( lowerCamelCase_=False ) -> List[str]:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : str = content[api_idx]['sections']
# Then to the model doc
_lowercase : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowercase : Tuple = False
_lowercase : Dict = api_doc[pipeline_idx]['sections']
_lowercase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowercase : Union[str, Any] = pipeline_doc['section']
_lowercase : List[str] = clean_doc_toc(lowerCamelCase_ )
if overwrite:
_lowercase : str = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
_lowercase : int = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
_lowercase : Tuple = True
if overwrite:
_lowercase : str = new_pipeline_docs
if diff:
if overwrite:
_lowercase : List[str] = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase_ = "examples/"
lowercase_ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowercase_ = "README.md"
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
_a = f.read()
_a , _a = REPLACE_PATTERNS[pattern]
_a = replace.replace('''VERSION''' , __A)
_a = re_pattern.sub(__A , __A)
with open(__A , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.write(__A)
def lowerCAmelCase (__A):
"""simple docstring"""
for folder, directories, fnames in os.walk(__A):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''')
if "legacy" in directories:
directories.remove('''legacy''')
for fname in fnames:
if fname.endswith('''.py'''):
update_version_in_file(os.path.join(__A , __A) , __A , pattern='''examples''')
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__A , __A , __A)
if not patch:
update_version_in_examples(__A)
def lowerCAmelCase ():
"""simple docstring"""
_a = '''🤗 Transformers currently provides the following architectures'''
_a = '''1. Want to contribute a new model?'''
with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
_a = f.readlines()
# Find the start of the list.
_a = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
_a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith('''1.'''):
_a = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__A , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.writelines(__A)
def lowerCAmelCase ():
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''') as f:
_a = f.read()
_a = REPLACE_PATTERNS['''init'''][0].search(__A).groups()[0]
return packaging.version.parse(__A)
def lowerCAmelCase (__A=False):
"""simple docstring"""
_a = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''')
if default_version.is_devrelease:
_a = default_version.base_version
elif patch:
_a = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_a = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_a = input(F'''Which version are you releasing? [{default_version}]''')
if len(__A) == 0:
_a = default_version
print(F'''Updating version to {version}.''')
global_version_update(__A , patch=__A)
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''')
clean_main_ref_in_model_list()
def lowerCAmelCase ():
"""simple docstring"""
_a = get_version()
_a = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_a = current_version.base_version
# Check with the user we got that right.
_a = input(F'''Which version are we developing now? [{dev_version}]''')
if len(__A) == 0:
_a = dev_version
print(F'''Updating version to {version}.''')
global_version_update(__A)
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''')
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 211 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a = new_id
# turn into Numpy arrays
_a = np.array(__A)
_a = np.array(__A)
if reduce_labels:
_a = 255
_a = label - 1
_a = 255
_a = label != ignore_index
_a = np.not_equal(__A , __A)
_a = pred_label[mask]
_a = np.array(__A)[mask]
_a = pred_label[pred_label == label]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(__A , __A):
_a , _a , _a , _a = intersect_and_union(
__A , __A , __A , __A , __A , __A)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = None , __A = False , ):
"""simple docstring"""
_a , _a , _a , _a = total_intersect_and_union(
__A , __A , __A , __A , __A , __A)
# compute metrics
_a = {}
_a = total_area_intersect.sum() / total_area_label.sum()
_a = total_area_intersect / total_area_union
_a = total_area_intersect / total_area_label
_a = np.nanmean(__A)
_a = np.nanmean(__A)
_a = all_acc
_a = iou
_a = acc
if nan_to_num is not None:
_a = {metric: np.nan_to_num(__A , nan=__A) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a__ (self , A , A , A , A , A = None , A = None , A = False , ) -> List[Any]:
"""simple docstring"""
_a = mean_iou(
results=A , gt_seg_maps=A , num_labels=A , ignore_index=A , nan_to_num=A , label_map=A , reduce_labels=A , )
return iou_result
| 211 | 1 |
import datasets
UpperCamelCase = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
UpperCamelCase = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
UpperCamelCase = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : List[str] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
| 221 |
from maths.prime_factors import prime_factors
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase_ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(lowerCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221 | 1 |
def a ( A__ : list[list] ) -> list[list]:
"""simple docstring"""
_lowercase =current_set.copy()
for row_index, row in enumerate(A__ ):
_lowercase =row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
_lowercase =column
continue
_lowercase =column / magnitude
# Subtract to cancel term
_lowercase =current_set[0]
_lowercase =[first_row]
_lowercase =current_set[1::]
for row in current_set:
_lowercase =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowercase =final_set[0]
_lowercase =[]
_lowercase =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowercase =simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , A__ )
_lowercase =resultant
return final_set
def a ( A__ : list[list] ) -> list:
"""simple docstring"""
if len(A__ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
_lowercase =len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(A__ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowercase =equations.copy()
if any(0 in row for row in data_set ):
_lowercase =data_set.copy()
_lowercase =[]
for row_index, row in enumerate(A__ ):
if 0 not in row:
_lowercase =data_set.pop(A__ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , A__ )
_lowercase =data_set.copy()
_lowercase =simplify(A__ )
_lowercase =simplified[::-1]
_lowercase =[]
for row in simplified:
_lowercase =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowercase =row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
_lowercase =temp_row[1::]
_lowercase =temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
_lowercase =[]
for item in solutions:
final.append(float(round(A__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 205 |
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 1 |
"""simple docstring"""
UpperCAmelCase__ = 256
# Modulus to hash a string
UpperCAmelCase__ = 100_0003
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = len(lowerCamelCase_ )
_UpperCAmelCase = len(lowerCamelCase_ )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase_ ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = """abc1abc12"""
_UpperCAmelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_UpperCAmelCase = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) and not rabin_karp(lowerCamelCase_ , lowerCamelCase_ )
# Test 2)
_UpperCAmelCase = """ABABX"""
_UpperCAmelCase = """ABABZABABYABABX"""
assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ )
# Test 3)
_UpperCAmelCase = """AAAB"""
_UpperCAmelCase = """ABAAAAAB"""
assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ )
# Test 4)
_UpperCAmelCase = """abcdabcy"""
_UpperCAmelCase = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ )
# Test 5)
_UpperCAmelCase = """Lü"""
_UpperCAmelCase = """Lüsai"""
assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase = """Lue"""
assert not rabin_karp(lowerCamelCase_ , lowerCamelCase_ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 358 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ['''image''']
UpperCamelCase = ['''image''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
return 8
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_UpperCAmelCase = CLIPVisionModel(A)
return model
@property
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
@property
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_UpperCAmelCase = PriorTransformer(**A)
return model
@property
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase = ShapERenderer(**A)
return model
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dummy_prior
_UpperCAmelCase = self.dummy_image_encoder
_UpperCAmelCase = self.dummy_image_processor
_UpperCAmelCase = self.dummy_renderer
_UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_UpperCAmelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : List[str] , A : Optional[Any] , A : Tuple=0) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A)).to(A)
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = pipe(**self.get_dummy_inputs(A))
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch_device == 'cpu'
_UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = self.get_dummy_inputs(A)
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase = batch_size * [inputs[key]]
_UpperCAmelCase = pipe(**A , num_images_per_prompt=A)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
_UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = pipe(
A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A , A)
| 290 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCamelCase ( lowerCamelCase__ ):
def __get__( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCamelCase_ ='''__cached_''' + self.fget.__name__
lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if cached is None:
lowerCamelCase_ =self.fget(lowerCAmelCase )
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return cached
def a_ ( __snake_case : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def a_ ( __snake_case : Any ) -> List[str]:
"""simple docstring"""
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def a_ ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
return isinstance(__snake_case , np.ndarray )
def a_ ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return _is_numpy(__snake_case )
def a_ ( __snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
import torch
return isinstance(__snake_case , torch.Tensor )
def a_ ( __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(__snake_case )
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
import torch
return isinstance(__snake_case , torch.device )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(__snake_case )
def a_ ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def a_ ( __snake_case : str ) -> Any:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def a_ ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def a_ ( __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def a_ ( __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def a_ ( __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def a_ ( __snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(__snake_case )
def a_ ( __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( __snake_case : Optional[Any] ) -> int:
"""simple docstring"""
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
lowerCamelCase_ =getattr(self, class_fields[0].name )
lowerCamelCase_ =all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase ):
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =first_field.items()
lowerCamelCase_ =True
else:
try:
lowerCamelCase_ =iter(lowerCAmelCase )
lowerCamelCase_ =True
except TypeError:
lowerCamelCase_ =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase ):
if (
not isinstance(lowerCAmelCase, (list, tuple) )
or not len(lowerCAmelCase ) == 2
or not isinstance(element[0], lowerCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase_ =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
lowerCamelCase_ =element[1]
elif first_field is not None:
lowerCamelCase_ =first_field
else:
for field in class_fields:
lowerCamelCase_ =getattr(self, field.name )
if v is not None:
lowerCamelCase_ =v
def __delitem__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase, lowerCAmelCase )
super().__setattr__(lowerCAmelCase, lowerCAmelCase )
def __setitem__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__setitem__(lowerCAmelCase, lowerCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def lowercase__ ( cls, lowerCAmelCase ):
"""simple docstring"""
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str ='longest'
lowercase : Tuple ='max_length'
lowercase : Dict ='do_not_pad'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str ='pt'
lowercase : Any ='tf'
lowercase : str ='np'
lowercase : List[Any] ='jax'
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =context_managers
lowerCamelCase_ =ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase )
def __exit__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
self.stack.__exit__(*lowerCAmelCase, **lowerCAmelCase )
def a_ ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =infer_framework(__snake_case )
if framework == "tf":
lowerCamelCase_ =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase_ =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase_ =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =model_class.__name__
lowerCamelCase_ =infer_framework(__snake_case )
if framework == "tf":
lowerCamelCase_ =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase_ =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase_ =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( __snake_case : MutableMapping , __snake_case : str = "" , __snake_case : str = "." ) -> str:
"""simple docstring"""
def _flatten_dict(__snake_case : List[str] , __snake_case : Tuple="" , __snake_case : str="." ):
for k, v in d.items():
lowerCamelCase_ =str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def a_ ( __snake_case : Tuple , __snake_case : bool = False ) -> Tuple:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( __snake_case : Dict , __snake_case : List[str]=None ) -> List[str]:
"""simple docstring"""
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(F'''Type not supported for transpose: {type(__snake_case )}.''' )
def a_ ( __snake_case : List[Any] , __snake_case : Tuple ) -> Dict:
"""simple docstring"""
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(F'''Type not supported for reshape: {type(__snake_case )}.''' )
def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(F'''Type not supported for squeeze: {type(__snake_case )}.''' )
def a_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' )
def a_ ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__snake_case )}.''' )
def a_ ( __snake_case : Tuple , __snake_case : int ) -> Optional[int]:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
lowerCamelCase_ =[F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase_ =F'''{repo_id}--{value}'''
return auto_map
def a_ ( __snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
for base_class in inspect.getmro(__snake_case ):
lowerCamelCase_ =base_class.__module__
lowerCamelCase_ =base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 75 |
def A ( lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = len(lowercase )
for i in range(1 , lowercase ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(lowercase , lowercase , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 222 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ = 8
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=BITS ):
'''simple docstring'''
lowercase__ : Union[str, Any] = x.device
lowercase__ : Dict = (x * 255).int().clamp(0 , 255 )
lowercase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase )
lowercase__ : Optional[int] = rearrange(__lowerCAmelCase , 'd -> d 1 1' )
lowercase__ : Dict = rearrange(__lowerCAmelCase , 'b c h w -> b c 1 h w' )
lowercase__ : Tuple = ((x & mask) != 0).float()
lowercase__ : str = rearrange(__lowerCAmelCase , 'b c d h w -> b (c d) h w' )
lowercase__ : List[Any] = bits * 2 - 1
return bits
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]=BITS ):
'''simple docstring'''
lowercase__ : Any = x.device
lowercase__ : Union[str, Any] = (x > 0).int()
lowercase__ : List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa )
lowercase__ : str = rearrange(__lowerCAmelCase , 'd -> d 1 1' )
lowercase__ : List[str] = rearrange(__lowerCAmelCase , 'b (c d) h w -> b c d h w' , d=8 )
lowercase__ : int = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict = 0.0 , SCREAMING_SNAKE_CASE_ : Dict = True , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[Any] = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase__ : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase__ : Optional[int] = self.alphas_cumprod[timestep]
lowercase__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase__ : Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase__ : Optional[int] = self.bit_scale
if self.config.clip_sample:
lowercase__ : Tuple = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase__ : Union[str, Any] = self._get_variance(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase__ : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase__ : List[Any] = model_output.device if torch.is_tensor(__lowerCAmelCase ) else '''cpu'''
lowercase__ : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
lowercase__ : List[str] = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise
lowercase__ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def snake_case__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]="epsilon" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any = True , ):
'''simple docstring'''
lowercase__ : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase__ : Tuple = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Dict = None
# 1. compute alphas, betas
lowercase__ : Tuple = self.alphas_cumprod[t]
lowercase__ : str = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase__ : int = 1 - alpha_prod_t
lowercase__ : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase__ : Optional[int] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
lowercase__ : Optional[int] = self.bit_scale
if self.config.clip_sample:
lowercase__ : str = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase__ : List[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[str] = 0
if t > 0:
lowercase__ : Any = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device )
lowercase__ : str = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
lowercase__ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ (__lowerCamelCase ):
def __init__( self , a , a , a = 1.0 , ):
super().__init__()
lowercase__ : Optional[int] = bit_scale
lowercase__ : Optional[Any] = (
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase)
@torch.no_grad()
def __call__( self , a = 256 , a = 256 , a = 50 , a = None , a = 1 , a = "pil" , a = True , **a , ):
lowercase__ : Tuple = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
lowercase__ : int = decimal_to_bits(__lowercase) * self.bit_scale
lowercase__ : Optional[Any] = latents.to(self.device)
self.scheduler.set_timesteps(__lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
lowercase__ : List[Any] = self.unet(__lowercase , __lowercase).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(__lowercase , __lowercase , __lowercase).prev_sample
lowercase__ : List[str] = bits_to_decimal(__lowercase)
if output_type == "pil":
lowercase__ : Optional[int] = self.numpy_to_pil(__lowercase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase)
| 350 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case_ = logging.get_logger(__name__)
snake_case_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_MAPPING
snake_case_ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 216 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : float ) ->float:
return 1_0 - x * x
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float ) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase__ : List[Any] =a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase__ : Any =(a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
lowerCamelCase__ : str =c
else:
lowerCamelCase__ : Tuple =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 126 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=512 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : List[str] =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : str =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Any =num_labels
lowerCamelCase__ : int =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : List[str] =self.vocab_size - 1
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , *lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , *lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : Tuple =OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict =config_and_inputs
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str =super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =inputs_dict['labels']
lowerCamelCase__ : Tuple =inputs_dict['labels']
lowerCamelCase__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
lowerCamelCase__ : List[Any] =[
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ )
| 126 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81 | 1 |
"""simple docstring"""
from math import ceil
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : Any = list(range(0 , _UpperCAmelCase ) )
lowercase : str = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase : List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_UpperCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_UpperCAmelCase )
# Missing blocks
lowercase : Any = [i for i in blocks if i not in device_map_blocks]
lowercase : int = [i for i in device_map_blocks if i not in blocks]
if len(_UpperCAmelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_UpperCAmelCase ) )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : str = list(range(_UpperCAmelCase ) )
lowercase : Dict = int(ceil(n_layers / len(_UpperCAmelCase ) ) )
lowercase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 , _UpperCAmelCase , _UpperCAmelCase )]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
| 255 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: List[Any] = logging.get_logger(__name__)
_UpperCamelCase: int = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'megatron-bert'
def __init__( self : int, lowerCAmelCase : List[Any]=29056, lowerCAmelCase : int=1024, lowerCAmelCase : List[str]=24, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Union[str, Any]=4096, lowerCAmelCase : Dict="gelu", lowerCAmelCase : List[str]=0.1, lowerCAmelCase : Any=0.1, lowerCAmelCase : str=512, lowerCAmelCase : str=2, lowerCAmelCase : Any=0.02, lowerCAmelCase : Any=1e-12, lowerCAmelCase : List[str]=0, lowerCAmelCase : List[str]="absolute", lowerCAmelCase : Any=True, **lowerCAmelCase : Union[str, Any], ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
lowercase : Tuple = vocab_size
lowercase : Any = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Any = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = position_embedding_type
lowercase : Optional[int] = use_cache
| 255 | 1 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(r'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(r'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(r'(?<!_)_(?!_)')
UpperCAmelCase_ = re.compile(r'(_{2,})')
UpperCAmelCase_ = r'^\w+(\.\w+)*$'
UpperCAmelCase_ = r'<>:/\|?*'
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , A__ )
__lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , A__ )
return name.lower()
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
__lowerCamelCase = _single_underscore_re.split(A__ )
__lowerCamelCase = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != """""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(A__ )
def lowerCamelCase__ ( A__ : int , A__ : Tuple ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , A__ ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(A__ )}-{split}'
def lowerCamelCase__ ( A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] , A__ : Union[str, Any]=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
__lowerCamelCase = os.path.join(A__ , A__ )
return f'{filepath}*'
def lowerCamelCase__ ( A__ : List[str] , A__ : Dict , A__ : List[Any] , A__ : Union[str, Any]=None , A__ : str=None ):
'''simple docstring'''
__lowerCamelCase = filename_prefix_for_split(A__ , A__ )
__lowerCamelCase = os.path.join(A__ , A__ )
if shard_lengths:
__lowerCamelCase = len(A__ )
__lowerCamelCase = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(A__ )]
if filetype_suffix:
__lowerCamelCase = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
__lowerCamelCase = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 29 |
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCamelCase = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 1 |
"""simple docstring"""
import operator
def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : bool = False , _UpperCamelCase : list | None = None ) -> list:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = operator.lt if reverse else operator.gt
__UpperCAmelCase : List[str] = solution or []
if not arr:
return solution
__UpperCAmelCase : str = [arr.pop(0 )]
for i, item in enumerate(_UpperCamelCase ):
if _operator(_UpperCamelCase , sublist[-1] ):
sublist.append(_UpperCamelCase )
arr.pop(_UpperCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(_UpperCamelCase )
else:
while sublist:
__UpperCAmelCase : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(_UpperCamelCase ):
if not _operator(_UpperCamelCase , _UpperCamelCase ):
solution.insert(_UpperCamelCase , _UpperCamelCase )
break
else:
solution.append(_UpperCamelCase )
strand_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 115 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = torch.device('cpu')
def lowerCamelCase ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = dct.pop(_UpperCamelCase )
__UpperCAmelCase : List[str] = val
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = []
for k in state_dict.keys():
__UpperCAmelCase : int = k
if ".pwconv" in k:
__UpperCAmelCase : Optional[int] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__UpperCAmelCase : List[Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__UpperCAmelCase : int = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__UpperCAmelCase : str = k_new.split(""".""" )
if ls[2].isdigit():
__UpperCAmelCase : Dict = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__UpperCAmelCase : List[str] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase : Optional[int] = 1_0_0_0
__UpperCAmelCase : Optional[int] = """huggingface/label-files"""
__UpperCAmelCase : str = """imagenet-1k-id2label.json"""
__UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : Any = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = idalabel
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCAmelCase : Tuple = [3, 3, 6, 4]
__UpperCAmelCase : List[Any] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
__UpperCAmelCase : Optional[int] = [3, 3, 9, 6]
__UpperCAmelCase : Tuple = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
__UpperCAmelCase : Dict = [4, 3, 1_0, 5]
__UpperCAmelCase : Optional[Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
__UpperCAmelCase : str = [4, 4, 1_2, 6]
__UpperCAmelCase : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" , check_hash=_UpperCamelCase )
else:
__UpperCAmelCase : Dict = torch.load(_UpperCamelCase , map_location="""cpu""" )
__UpperCAmelCase : str = checkpoint
__UpperCAmelCase : Union[str, Any] = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__UpperCAmelCase : int = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__UpperCAmelCase : Optional[Any] = processor(images=_UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
__UpperCAmelCase : Tuple = get_expected_output(_UpperCamelCase )
__UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1E-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
UpperCAmelCase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 115 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=6 , __UpperCamelCase=1_7 , __UpperCamelCase=2_3 , __UpperCamelCase=1_1 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = act_dim
UpperCamelCase_ = state_dim
UpperCamelCase_ = hidden_size
UpperCamelCase_ = max_length
UpperCamelCase_ = is_training
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCamelCase_ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = DecisionTransformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Optional[int] = (DecisionTransformerModel,) if is_torch_available() else ()
A__ : Optional[int] = ()
A__ : Any = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A__ : List[str] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A__ : Dict = False
A__ : str = False
A__ : Optional[int] = False
A__ : str = False
A__ : Any = False
A__ : Union[str, Any] = False
A__ : str = False
A__ : List[str] = False
A__ : Union[str, Any] = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = DecisionTransformerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = DecisionTransformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
@require_torch
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase_ = 1_0 # defined by the RL environment, may be normalized
UpperCamelCase_ = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
UpperCamelCase_ = model.to(__UpperCamelCase )
UpperCamelCase_ = model.config
torch.manual_seed(0 )
UpperCamelCase_ = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCamelCase , dtype=torch.floataa ) # env.reset()
UpperCamelCase_ = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__UpperCamelCase )
UpperCamelCase_ = torch.tensor(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase_ = state
UpperCamelCase_ = torch.zeros(1 , 0 , config.act_dim , device=__UpperCamelCase , dtype=torch.floataa )
UpperCamelCase_ = torch.zeros(1 , 0 , device=__UpperCamelCase , dtype=torch.floataa )
UpperCamelCase_ = torch.tensor(0 , device=__UpperCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__UpperCamelCase ):
UpperCamelCase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCamelCase )] , dim=1 )
UpperCamelCase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCamelCase )] , dim=1 )
UpperCamelCase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = model(
states=__UpperCamelCase , actions=__UpperCamelCase , rewards=__UpperCamelCase , returns_to_go=__UpperCamelCase , timesteps=__UpperCamelCase , attention_mask=__UpperCamelCase , return_dict=__UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase_ = action_pred[0, -1]
UpperCamelCase_ = torch.cat([states, state] , dim=1 )
UpperCamelCase_ = returns_to_go[0, -1] - reward
UpperCamelCase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=__UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 356 |
import comet # From: unbabel-comet
import torch
import datasets
_A = datasets.logging.get_logger(__name__)
_A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.config_name == "default":
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
if gpus is None:
UpperCamelCase_ = 1 if torch.cuda.is_available() else 0
UpperCamelCase_ = {"""src""": sources, """mt""": predictions, """ref""": references}
UpperCamelCase_ = [dict(zip(__UpperCamelCase , __UpperCamelCase ) ) for t in zip(*data.values() )]
UpperCamelCase_ , UpperCamelCase_ = self.scorer.predict(__UpperCamelCase , gpus=__UpperCamelCase , progress_bar=__UpperCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 261 | 0 |
from random import randint, random
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = 5 , ) -> list:
"""simple docstring"""
A__ = [[-1] * number_of_cells] # Create a highway without any car
A__ = 0
A__ = max(lowercase_ , 0 )
while i < number_of_cells:
A__ = (
randint(0 , lowercase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = 0
A__ = highway_now[car_index + 1 :]
for cell in range(len(lowercase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase_ , -1 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = len(lowercase_ )
# Beforce calculations, the highway is empty
A__ = [-1] * number_of_cells
for car_index in range(lowercase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A__ = min(highway_now[car_index] + 1 , lowercase_ )
# Number of empty cell before the next car
A__ = get_distance(lowercase_ , lowercase_ ) - 1
# We can't have the car causing an accident
A__ = min(next_highway[car_index] , lowercase_ )
if random() < probability:
# Randomly, a driver will slow down
A__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = len(highway[0] )
for i in range(lowercase_ ):
A__ = update(highway[i] , lowercase_ , lowercase_ )
A__ = [-1] * number_of_cells
for car_index in range(lowercase_ ):
A__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A__ = (car_index + speed) % number_of_cells
# Commit the change of position
A__ = speed
highway.append(lowercase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
'''simple docstring'''
import heapq
def __UpperCamelCase ( lowercase__ : dict ):
'''simple docstring'''
__lowercase =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase__, [-1 * len(lowercase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase =heapq.heappop(lowercase__ )[1][0]
chosen_vertices.add(lowercase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase =elem[1][1].index(lowercase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 141 | 0 |
def lowerCamelCase__ (_UpperCAmelCase = 100_0000):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = {1: 1}
for inputa in range(2 , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE = inputa
SCREAMING_SNAKE_CASE = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 327 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
a_ : List[Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
a_ : List[str] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a=True , a=False) -> Optional[Any]:
if rouge_types is None:
SCREAMING_SNAKE_CASE = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a)
if use_aggregator:
SCREAMING_SNAKE_CASE = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE = []
for ref, pred in zip(a , a):
SCREAMING_SNAKE_CASE = scorer.score(a , a)
if use_aggregator:
aggregator.add_scores(a)
else:
scores.append(a)
if use_aggregator:
SCREAMING_SNAKE_CASE = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE = [score[key] for score in scores]
return result
| 327 | 1 |
'''simple docstring'''
lowercase_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowercase_ = ["a", "b", "c", "d", "e"]
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = start
# add current to visited
visited.append(__A)
_a = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_a = topological_sort(__A , __A , __A)
# if all neighbors visited add current to sort
sort.append(__A)
# if all vertices haven't been visited select a new one to visit
if len(__A) != len(__A):
for vertice in vertices:
if vertice not in visited:
_a = topological_sort(__A , __A , __A)
# return sort
return sort
if __name__ == "__main__":
lowercase_ = topological_sort("a", [], [])
print(sort)
| 211 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'gptj'
__lowerCamelCase : List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , A=50_400 , A=2_048 , A=4_096 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1E-5 , A=0.02 , A=True , A=50_256 , A=50_256 , A=False , **A , ) -> Tuple:
"""simple docstring"""
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A = "default" , A = None , A = False , ) -> List[str]:
"""simple docstring"""
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , '''pad_token_id''' , A ):
# TODO: how to do that better?
_a = 0
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a__ (self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def a__ (self ) -> int:
"""simple docstring"""
return self._config.n_head
def a__ (self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def a__ (self ) -> int:
"""simple docstring"""
return 13
| 211 | 1 |
import copy
import re
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Optional[int] = "hp"
__UpperCamelCase : Any = {}
__UpperCamelCase : Tuple = None
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = prefix
UpperCamelCase : Tuple = defaults
cls.build_naming_info()
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
return ""
UpperCamelCase : Any = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ):
UpperCamelCase : Optional[Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase : Dict = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = ''''''
while integer != 0:
UpperCamelCase : List[str] = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
UpperCamelCase : List[str] = 0
while True:
UpperCamelCase : Union[str, Any] = word + '''#''' + int_to_alphabetic(__SCREAMING_SNAKE_CASE )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase : List[Any] = sword
break
UpperCamelCase : Optional[int] = short_word
UpperCamelCase : str = word
return short_word
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = param_name.split('''_''' )
UpperCamelCase : Any = [TrialShortNamer.shortname_for_word(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase : int = ['''''', '''_''']
for separator in separators:
UpperCamelCase : Dict = separator.join(__SCREAMING_SNAKE_CASE )
if shortname not in info["reverse_short_param"]:
UpperCamelCase : Tuple = shortname
UpperCamelCase : List[Any] = param_name
return shortname
return param_name
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = TrialShortNamer.shortname_for_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = short_name
UpperCamelCase : Union[str, Any] = param_name
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
UpperCamelCase : Union[str, Any] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
UpperCamelCase : Optional[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = info
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase : Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase : Any = cls.NAMING_INFO['''short_param'''][k]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = 1 if v else 0
UpperCamelCase : List[Any] = '''''' if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) else '''-'''
UpperCamelCase : str = f"""{key}{sep}{v}"""
name.append(__SCREAMING_SNAKE_CASE )
return "_".join(__SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase : Optional[int] = []
else:
UpperCamelCase : str = repr.split('''_''' )
UpperCamelCase : Any = {}
for value in values:
if "-" in value:
UpperCamelCase , UpperCamelCase : Any = value.split('''-''' )
else:
UpperCamelCase : Tuple = re.sub('''[0-9.]''' , '''''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = float(re.sub('''[^0-9.]''' , '''''' , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[str] = cls.NAMING_INFO['''reverse_short_param'''][p_k]
UpperCamelCase : Tuple = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase : List[Any] = cls.DEFAULTS[k]
return parameters
| 315 |
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Any = {"vocab_file": "vocab.txt"}
snake_case_ : List[Any] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
snake_case_ : Union[str, Any] = {
"facebook/esm2_t6_8M_UR50D": 1024,
"facebook/esm2_t12_35M_UR50D": 1024,
}
def A (__A : Optional[int] ) -> Dict:
"""simple docstring"""
with open(__A , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _snake_case : int , _snake_case : Tuple="<unk>" , _snake_case : List[str]="<cls>" , _snake_case : str="<pad>" , _snake_case : str="<mask>" , _snake_case : Optional[Any]="<eos>" , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = load_vocab_file(_snake_case)
UpperCAmelCase_ = dict(enumerate(self.all_tokens))
UpperCAmelCase_ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
UpperCAmelCase_ = unk_token
UpperCAmelCase_ = cls_token
UpperCAmelCase_ = pad_token
UpperCAmelCase_ = mask_token
UpperCAmelCase_ = eos_token
UpperCAmelCase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def lowerCamelCase ( self : str , _snake_case : int):
"""simple docstring"""
return self._id_to_token.get(_snake_case , self.unk_token)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : str):
"""simple docstring"""
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token))
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any] , **_snake_case : int):
"""simple docstring"""
return text.split()
def lowerCamelCase ( self : int , _snake_case : Optional[Any]=False):
"""simple docstring"""
return len(self._id_to_token)
def lowerCamelCase ( self : str):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def lowerCamelCase ( self : Dict , _snake_case : str):
"""simple docstring"""
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token))
def lowerCamelCase ( self : Tuple , _snake_case : int):
"""simple docstring"""
return self._id_to_token.get(_snake_case , self.unk_token)
def lowerCamelCase ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase ( self : Tuple , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase_ = [1] + ([0] * len(_snake_case)) + [1]
if token_ids_a is not None:
mask += [0] * len(_snake_case) + [1]
return mask
def lowerCamelCase ( self : Any , _snake_case : List[str] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = os.path.join(_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''')
with open(_snake_case , '''w''') as f:
f.write('''\n'''.join(self.all_tokens))
return (vocab_file,)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[List[str], List[AddedToken]] , _snake_case : bool = False):
"""simple docstring"""
return super()._add_tokens(_snake_case , special_tokens=_snake_case)
| 51 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
@property
def __magic_name__ ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Any = PNDMScheduler()
UpperCAmelCase : List[str] = PNDMPipeline(unet=__A, scheduler=__A )
pndm.to(__A )
pndm.set_progress_bar_config(disable=__A )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pndm(generator=__A, num_inference_steps=2_0, output_type='''numpy''' ).images
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pndm(generator=__A, num_inference_steps=2_0, output_type='''numpy''', return_dict=__A )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase : Dict = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Optional[int] = '''google/ddpm-cifar10-32'''
UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained(__A )
UpperCAmelCase : Union[str, Any] = PNDMScheduler()
UpperCAmelCase : List[Any] = PNDMPipeline(unet=__A, scheduler=__A )
pndm.to(__A )
pndm.set_progress_bar_config(disable=__A )
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : Dict = pndm(generator=__A, output_type='''numpy''' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase : int = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 365 |
import logging
import os
from .state import PartialState
class __UpperCAmelCase ( logging.LoggerAdapter ):
@staticmethod
def __magic_name__ ( __A : str ):
UpperCAmelCase : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : Union[str, Any], *__A : Optional[int], **__A : Tuple ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
UpperCAmelCase : List[str] = kwargs.pop('''main_process_only''', __A )
UpperCAmelCase : int = kwargs.pop('''in_order''', __A )
if self.isEnabledFor(__A ):
if self._should_log(__A ):
UpperCAmelCase , UpperCAmelCase : Dict = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
elif in_order:
UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
state.wait_for_everyone()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str = None ) -> Dict:
if log_level is None:
UpperCAmelCase : Union[str, Any] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCAmelCase )
UpperCAmelCase : Tuple = logging.getLogger(UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase , {} )
| 99 | 0 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_snake_case = 'src/diffusers'
# Matches is_xxx_available()
_snake_case = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_snake_case = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_snake_case = '\n{0} = None\n'
_snake_case = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_snake_case = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _A ( snake_case ) -> Dict:
_lowercase : List[Any] = _re_backend.findall(snake_case )
if len(snake_case ) == 0:
return None
return "_and_".join(snake_case )
def _A ( ) -> Dict:
with open(os.path.join(snake_case , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowercase : Any = f.readlines()
# Get to the point we do the actual imports for type checking
_lowercase : Union[str, Any] = 0
_lowercase : List[str] = {}
# Go through the end of the file
while line_index < len(snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_lowercase : Optional[int] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
_lowercase : List[str] = []
# Until we unindent, add backend objects to the list
while line_index < len(snake_case ) and len(lines[line_index] ) > 1:
_lowercase : str = lines[line_index]
_lowercase : Tuple = _re_single_line_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(snake_case ) > 0:
_lowercase : int = objects
else:
line_index += 1
return backend_specific_objects
def _A ( snake_case , snake_case ) -> Optional[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(snake_case , snake_case )
else:
return DUMMY_CLASS.format(snake_case , snake_case )
def _A ( snake_case=None ) -> str:
if backend_specific_objects is None:
_lowercase : str = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_lowercase : Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
_lowercase : str = "[" + ", ".join(F'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
_lowercase : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(snake_case , snake_case ) for o in objects] )
_lowercase : Union[str, Any] = dummy_file
return dummy_files
def _A ( snake_case=False ) -> Union[str, Any]:
_lowercase : Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_lowercase : Any = {"torch": "pt"}
# Locate actual dummy modules and read their content.
_lowercase : Tuple = os.path.join(snake_case , "utils" )
_lowercase : Union[str, Any] = {
backend: os.path.join(snake_case , F'''dummy_{short_names.get(snake_case , snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
_lowercase : Union[str, Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(snake_case ):
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowercase : int = f.read()
else:
_lowercase : Optional[Any] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'''diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 250 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = inspect.getfile(accelerate.test_utils )
_lowercase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_lowercase : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_lowercase : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
| 250 | 1 |
def _UpperCamelCase ( snake_case__ ) -> int:
assert isinstance(snake_case__, snake_case__ ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__UpperCAmelCase : Any = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(snake_case__ )
else:
__UpperCAmelCase : int = sylvester(number - 1 )
__UpperCAmelCase : Tuple = num - 1
__UpperCAmelCase : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 342 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
'''simple docstring'''
def a__ ( a__ ): # noqa: E741
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [0] * n
__SCREAMING_SNAKE_CASE = [False] * n
__SCREAMING_SNAKE_CASE = [False] * n
def dfs(a__ , a__ , a__ , a__ ):
if parent == root:
out_edge_count += 1
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__SCREAMING_SNAKE_CASE = dfs(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__SCREAMING_SNAKE_CASE = True
# AP found via cycle
if at == low[to]:
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = min(low[at] , a__ )
return out_edge_count
for i in range(a__ ):
if not visited[i]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = dfs(a__ , a__ , -1 , a__ )
__SCREAMING_SNAKE_CASE = out_edge_count > 1
for x in range(len(a__ ) ):
if is_art[x] is True:
print(a__ )
# Adjacency list of graph
UpperCAmelCase : Any = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 267 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "linear"
lowerCAmelCase__ = "cosine"
lowerCAmelCase__ = "cosine_with_restarts"
lowerCAmelCase__ = "polynomial"
lowerCAmelCase__ = "constant"
lowerCAmelCase__ = "constant_with_warmup"
lowerCAmelCase__ = "piecewise_constant"
def a__ ( a__ , a__ = -1 ):
"""simple docstring"""
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = float(a__ )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(a__ , a__ ):
def rule_func(a__ ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ , a__=-1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 0.5 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 1 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__=1E-7 , a__=1.0 , a__=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
UpperCAmelCase : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = 1 , a__ = 1.0 , a__ = -1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SchedulerType(a__ )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 267 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = [False] * len(__lowerCamelCase )
__snake_case : Optional[int] = []
queue.append(__lowerCamelCase )
__snake_case : List[Any] = True
while queue:
__snake_case : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
__snake_case : Any = True
__snake_case : Union[str, Any] = u
return visited[t]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : int = [-1] * (len(__lowerCamelCase ))
__snake_case : Tuple = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = float("""Inf""" )
__snake_case : Tuple = sink
while s != source:
# Find the minimum value in select path
__snake_case : int = min(__lowerCamelCase , graph[parent[s]][s] )
__snake_case : Tuple = parent[s]
max_flow += path_flow
__snake_case : List[Any] = sink
while v != source:
__snake_case : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__snake_case : Any = parent[v]
return max_flow
__UpperCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCamelCase , __UpperCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 367 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
__snake_case : int = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
__snake_case : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a = TypeVar('''T''')
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : T ):
_A = data
_A = None
def __str__( self : str ):
return F'''{self.data}'''
class lowercase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
_A = None
def __iter__( self : List[Any] ):
_A = self.top
while node:
yield node.data
_A = node.next
def __str__( self : Union[str, Any] ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ ( self : str ):
return self.top is None
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ):
_A = Node(_UpperCAmelCase )
if not self.is_empty():
_A = self.top
_A = node
def lowerCAmelCase_ ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_A = self.top
_A = self.top.next
return pop_node.data
def lowerCAmelCase_ ( self : Tuple ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 315 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCamelCase = HfArgumentParser(InitializationArguments)
__lowerCamelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCamelCase = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__lowerCamelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCamelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 358 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 0 |
"""simple docstring"""
from manim import *
class _a ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
def lowercase__ ( self : List[str] )->int:
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = Text('''CPU''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
_UpperCAmelCase = [mem.copy() for i in range(4 )]
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = Text('''GPU''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = Text('''Model''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
_UpperCAmelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
_UpperCAmelCase = [mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = Text('''Loaded Checkpoint''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(__UpperCAmelCase ):
_UpperCAmelCase = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
_UpperCAmelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
_UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
_UpperCAmelCase = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_UpperCAmelCase = Text('''Disk''' , font_size=2_4 )
_UpperCAmelCase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
_UpperCAmelCase = []
for i, rect in enumerate(__UpperCAmelCase ):
_UpperCAmelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
_UpperCAmelCase = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 260 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
a_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(__UpperCAmelCase)
from datasets import load_dataset
a_ = load_dataset("nielsr/rvlcdip-demo")
a_ = dataset["train"][0]["image"].convert("RGB")
a_ = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.logits
a_ = torch.Size((1, 16))
self.assertEqual(logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4))
| 243 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
lowerCAmelCase__ : int ='The Nymphenburg Palace is a beautiful palace in Munich!'
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
SCREAMING_SNAKE_CASE_ : Dict = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE_ : Any = BERTEncoder(
attention_cell=predefined_args['attention_cell'], num_layers=predefined_args['num_layers'], units=predefined_args['units'], hidden_size=predefined_args['hidden_size'], max_length=predefined_args['max_length'], num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'], dropout=predefined_args['dropout'], output_attention=A__, output_all_encodings=A__, use_residual=predefined_args['use_residual'], activation=predefined_args.get('activation', 'gelu' ), layer_norm_eps=predefined_args.get('layer_norm_eps', A__ ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(get_home_dir(), 'models' )
SCREAMING_SNAKE_CASE_ : Tuple = _load_vocab(A__, A__, A__, cls=A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nlp.model.BERTModel(
A__, len(A__ ), units=predefined_args['units'], embed_size=predefined_args['embed_size'], embed_dropout=predefined_args['embed_dropout'], word_embed=predefined_args['word_embed'], use_pooler=A__, use_token_type_embed=A__, token_type_vocab_size=predefined_args['token_type_vocab_size'], use_classifier=A__, use_decoder=A__, )
original_bort.load_parameters(A__, cast_dtype=A__, ignore_extra=A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE_ : List[str] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(A__ ),
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertConfig.from_dict(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = BertForMaskedLM(A__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = hf_param.shape
SCREAMING_SNAKE_CASE_ : Dict = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE_ : List[str] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, 'word_embed.0.weight' )
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, 'encoder.position_weight' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, 'encoder.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE_ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE_ : BertSelfAttention = layer.attention.self
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
self_attn.key.bias.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params(
self_attn.key.weight.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
SCREAMING_SNAKE_CASE_ : int = check_and_map_params(
self_attn.query.weight.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params(
self_attn.value.bias.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params(
self_attn.value.weight.data, F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
SCREAMING_SNAKE_CASE_ : BertSelfOutput = layer.attention.output
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
self_output.dense.bias, F'''encoder.transformer_cells.{i}.proj.bias''' )
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
self_output.dense.weight, F'''encoder.transformer_cells.{i}.proj.weight''' )
SCREAMING_SNAKE_CASE_ : int = check_and_map_params(
self_output.LayerNorm.bias, F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_output.LayerNorm.weight, F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
SCREAMING_SNAKE_CASE_ : BertIntermediate = layer.intermediate
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
intermediate.dense.bias, F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
intermediate.dense.weight, F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
SCREAMING_SNAKE_CASE_ : BertOutput = layer.output
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
bert_output.dense.bias, F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
SCREAMING_SNAKE_CASE_ : int = check_and_map_params(
bert_output.dense.weight, F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias, F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.weight, F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE_ : Optional[int] = RobertaTokenizer.from_pretrained('roberta-base' )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode_plus(A__ )['input_ids']
# Get gluon output
SCREAMING_SNAKE_CASE_ : Tuple = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE_ : Dict = original_bort(inputs=A__, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A__ )
SCREAMING_SNAKE_CASE_ : Any = BertModel.from_pretrained(A__ )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode_plus(A__, return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hf_bort_model(**A__ )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE_ : List[Any] = np.allclose(A__, A__, atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:', A__ )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ : str =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 365 |
import socket
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ : Any = socket.gethostname()
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file', 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 162 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a ( _lowerCamelCase ):
def A_ ( self : Optional[int] ):
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(lowercase_ , '''num_encoder_blocks''' ) )
class a :
def __init__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=64 , lowercase_ : Any=3 , lowercase_ : Optional[Any]=4 , lowercase_ : Dict=[2, 2, 2, 2] , lowercase_ : int=[8, 4, 2, 1] , lowercase_ : str=[16, 32, 64, 128] , lowercase_ : Optional[Any]=[1, 4, 8, 16] , lowercase_ : Any=[1, 2, 4, 8] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : List[str]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Any=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Union[str, Any]=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = sr_ratios
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = downsampling_rates
snake_case_ = num_attention_heads
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
def A_ ( self : str ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : int ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Any ):
snake_case_ = SegformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
snake_case_ = snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = SegformerForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str ):
snake_case_ = 1
snake_case_ = SegformerForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowercase_ )
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self : List[str] ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Any ):
snake_case_ = SegformerModelTester(self )
snake_case_ = SegformerConfigTester(self , config_class=lowercase_ )
def A_ ( self : Tuple ):
self.config_tester.run_common_tests()
def A_ ( self : int ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : int ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase_ )
def A_ ( self : Any ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase_ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def A_ ( self : str ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def A_ ( self : List[Any] ):
pass
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def A_ ( self : str ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.attentions
snake_case_ = sum(self.model_tester.depths )
self.assertEqual(len(lowercase_ ) , lowercase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ = (self.model_tester.image_size // 32) ** 2
snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ = len(lowercase_ )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A_ ( self : str ):
def check_hidden_states_output(lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] ):
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def A_ ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ):
continue
snake_case_ = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
snake_case_ = model(**lowercase_ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A_ ( self : List[Any] ):
pass
@slow
def A_ ( self : Dict ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SegformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class a ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ )
snake_case_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def A_ ( self : List[str] ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ )
snake_case_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1e-1 ) )
@slow
def A_ ( self : str ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
lowercase_ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(500, 300)] )
snake_case_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowercase_ )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
snake_case_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 56 |
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool:
__snake_case : List[str] = len(lowercase )
__snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
class lowercase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = row
UpperCAmelCase__ = col
UpperCAmelCase__ = graph
def UpperCamelCase__ (self , __a , __a , __a ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self , __a , __a , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __a )
def UpperCamelCase__ (self ) -> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__a , __a , __a )
count += 1
return count
| 335 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=__a )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 335 | 1 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_a = TypeVar('KEY')
_a = TypeVar('VAL')
@dataclass(frozen=lowercase__ ,slots=lowercase__ )
class A_ (Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : KEY
SCREAMING_SNAKE_CASE__ : VAL
class A_ (_Item ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ )
def __bool__( self ):
"""simple docstring"""
return False
_a = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowercase_ = 8 , lowercase_ = 0.75 ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = initial_block_size
UpperCAmelCase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase_ : Optional[Any] = capacity_factor
UpperCAmelCase_ : int = 0
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return hash(lowercase_ ) % len(self._buckets )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self._buckets[ind]
if not stored:
UpperCAmelCase_ : List[Any] = _Item(lowercase_ , lowercase_ )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase_ : Tuple = _Item(lowercase_ , lowercase_ )
return True
else:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase_ : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self._buckets
UpperCAmelCase_ : Any = [None] * new_size
UpperCAmelCase_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self._get_bucket_index(lowercase_ )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase_ : Tuple = self._get_next_ind(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
for ind in self._iterate_buckets(lowercase_ ):
if self._try_set(lowercase_ , lowercase_ , lowercase_ ):
break
def __setitem__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowercase_ , lowercase_ )
def __delitem__( self , lowercase_ ):
"""simple docstring"""
for ind in self._iterate_buckets(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(lowercase_ )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase_ : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowercase_ ):
"""simple docstring"""
for ind in self._iterate_buckets(lowercase_ ):
UpperCAmelCase_ : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase_ )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = " ,".join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 61 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
raise NotImplementedError()
| 195 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
__SCREAMING_SNAKE_CASE = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
a__ : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_params["target"]
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ , sep="/" )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 195 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: int = logging.get_logger(__name__)
def a( A : Any ) -> List[str]:
"""simple docstring"""
a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a = 128
elif "12-12" in model_name:
a = 12
a = 12
elif "14-14" in model_name:
a = 14
a = 14
elif "16-16" in model_name:
a = 16
a = 16
else:
raise ValueError("Model not supported" )
a = "huggingface/label-files"
if "speech-commands" in model_name:
a = 35
a = "speech-commands-v2-id2label.json"
else:
a = 527
a = "audioset-id2label.json"
a = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
a = {int(a__ ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def a( A : int ) -> Optional[Any]:
"""simple docstring"""
if "module.v" in name:
a = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def a( A : str , A : Optional[Any] ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(a__ )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = config.hidden_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
else:
a = val
return orig_state_dict
def a( A : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
@torch.no_grad()
def a( A : Optional[Any] , A : Optional[int] , A : int=False ) -> int:
"""simple docstring"""
a = get_audio_spectrogram_transformer_config(a__ )
a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(a__ , map_location="cpu" )
# remove some keys
remove_keys(a__ )
# rename some keys
a = convert_state_dict(a__ , a__ )
# load 🤗 model
a = ASTForAudioClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a = -4.2_677_393 if "speech-commands" not in model_name else -6.845_978
a = 4.5_689_974 if "speech-commands" not in model_name else 5.5_654_526
a = 1024 if "speech-commands" not in model_name else 128
a = ASTFeatureExtractor(mean=a__ , std=a__ , max_length=a__ )
if "speech-commands" in model_name:
a = load_dataset("speech_commands" , "v0.02" , split="validation" )
a = dataset[0]["audio"]["array"]
else:
a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a = torchaudio.load(a__ )
a = waveform.squeeze().numpy()
a = feature_extractor(a__ , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
a = model(**a__ )
a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
a = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , a__ , atol=1e-4 ):
raise ValueError("Logits don\'t match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(a__ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
A__ : List[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
A__ : Union[str, Any] ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class UpperCAmelCase ( _a ):
_lowercase: List[Any] = 42
class UpperCAmelCase ( _a ):
def __init__( self : Optional[Any] , __snake_case : PriorTransformer , __snake_case : CLIPVisionModel , __snake_case : CLIPImageProcessor , __snake_case : HeunDiscreteScheduler , __snake_case : ShapERenderer , ) -> Optional[int]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def lowercase__ ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Any ) -> List[str]:
if latents is None:
_lowerCAmelCase = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
_lowerCAmelCase = latents.to(_a )
_lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Dict , __snake_case : str=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase = torch.device(f"cuda:{gpu_id}" )
_lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def lowercase__ ( self : Optional[Any] ) -> Any:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase__ ( self : Any , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[str] , ) -> Any:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_lowerCAmelCase = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=_a )
_lowerCAmelCase = self.image_encoder(_a )["last_hidden_state"]
_lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCAmelCase = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : List[Any] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 1 , __snake_case : int = 25 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : float = 4.0 , __snake_case : int = 64 , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> str:
if isinstance(_a , PIL.Image.Image ):
_lowerCAmelCase = 1
elif isinstance(_a , torch.Tensor ):
_lowerCAmelCase = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_lowerCAmelCase = len(_a )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}" )
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_lowerCAmelCase = self.scheduler.timesteps
_lowerCAmelCase = self.prior.config.num_embeddings
_lowerCAmelCase = self.prior.config.embedding_dim
_lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCAmelCase = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(_a , _a )
_lowerCAmelCase = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCAmelCase = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_lowerCAmelCase = []
for i, latent in enumerate(_a ):
print()
_lowerCAmelCase = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_a )
_lowerCAmelCase = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
_lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
_lowerCAmelCase = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 370 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCAmelCase = i
_lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Dict = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class snake_case__ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : int = 14 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
a = primes[group]["prime"]
a = primes[group]["generator"]
a = int(hexlify(urandom(32 ) ) , base=16 )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
return hex(self.__private_key )[2:]
def __UpperCAmelCase ( self : List[Any] ) -> str:
a = pow(self.generator , self.__private_key , self.prime )
return hex(__lowerCamelCase )[2:]
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ) -> str:
a = int(__lowerCamelCase , base=16 )
if not self.is_valid_public_key(__lowerCamelCase ):
raise ValueError("Invalid public key" )
a = pow(__lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(__lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowerCamelCase , (prime - 1) // 2 , __lowerCamelCase ) == 1
)
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int = 14 ) -> str:
a = int(__lowerCamelCase , base=16 )
a = int(__lowerCamelCase , base=16 )
a = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Invalid public key" )
a = pow(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return shaaaa(str(__lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
def lowerCAmelCase__(__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 209 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = old_name
if "patch_embed" in old_name:
__UpperCAmelCase : int = old_name.split("." )
if layer == "0":
__UpperCAmelCase : Dict = old_name.replace("0", "convolution1" )
elif layer == "1":
__UpperCAmelCase : List[str] = old_name.replace("1", "batchnorm_before" )
elif layer == "3":
__UpperCAmelCase : List[str] = old_name.replace("3", "convolution2" )
else:
__UpperCAmelCase : Union[str, Any] = old_name.replace("4", "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d", lowerCAmelCase_ ):
__UpperCAmelCase : Dict = r'\b\d{2}\b'
if bool(re.search(lowerCAmelCase_, lowerCAmelCase_ ) ):
__UpperCAmelCase : Dict = re.search(R"\d\.\d\d.", lowerCAmelCase_ ).group()
else:
__UpperCAmelCase : Optional[int] = re.search(R"\d\.\d.", lowerCAmelCase_ ).group()
if int(match[0] ) < 6:
__UpperCAmelCase : int = old_name.replace(lowerCAmelCase_, "" )
__UpperCAmelCase : Union[str, Any] = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1] )
__UpperCAmelCase : int = 'intermediate_stages.' + trimmed_name
else:
__UpperCAmelCase : Tuple = old_name.replace(lowerCAmelCase_, "" )
if int(match[2] ) < num_meta4D_last_stage:
__UpperCAmelCase : str = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2] )
else:
__UpperCAmelCase : int = str(int(match[2] ) - num_meta4D_last_stage )
__UpperCAmelCase : List[str] = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
__UpperCAmelCase : Union[str, Any] = trimmed_name.replace("norm1", "layernorm1" )
elif "norm2" in old_name:
__UpperCAmelCase : List[Any] = trimmed_name.replace("norm2", "layernorm2" )
elif "fc1" in old_name:
__UpperCAmelCase : Dict = trimmed_name.replace("fc1", "linear_in" )
elif "fc2" in old_name:
__UpperCAmelCase : List[Any] = trimmed_name.replace("fc2", "linear_out" )
__UpperCAmelCase : Dict = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R".\d.", lowerCAmelCase_ ):
__UpperCAmelCase : List[str] = old_name.replace("network", "intermediate_stages" )
if "fc" in new_name:
__UpperCAmelCase : Optional[int] = new_name.replace("fc", "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__UpperCAmelCase : Tuple = new_name.replace("norm1", "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__UpperCAmelCase : Union[str, Any] = new_name.replace("norm2", "batchnorm_after" )
if "proj" in new_name:
__UpperCAmelCase : Tuple = new_name.replace("proj", "projection" )
if "dist_head" in new_name:
__UpperCAmelCase : List[Any] = new_name.replace("dist_head", "distillation_classifier" )
elif "head" in new_name:
__UpperCAmelCase : Dict = new_name.replace("head", "classifier" )
elif "patch_embed" in new_name:
__UpperCAmelCase : Tuple = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__UpperCAmelCase : int = new_name.replace("norm", "layernorm" )
__UpperCAmelCase : Any = 'efficientformer.' + new_name
else:
__UpperCAmelCase : Optional[int] = 'efficientformer.encoder.' + new_name
return new_name
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
for key in checkpoint.copy().keys():
__UpperCAmelCase : List[str] = checkpoint.pop(lowerCAmelCase_ )
__UpperCAmelCase : List[str] = val
return checkpoint
def __UpperCamelCase ( ):
__UpperCAmelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCAmelCase : List[str] = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return image
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : int = torch.load(lowerCAmelCase_, map_location="cpu" )['model']
__UpperCAmelCase : Any = EfficientFormerConfig.from_json_file(lowerCAmelCase_ )
__UpperCAmelCase : Optional[int] = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase_ )
__UpperCAmelCase : List[Any] = '_'.join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
__UpperCAmelCase : Dict = config.depths[-1] - config.num_metaad_blocks + 1
__UpperCAmelCase : int = convert_torch_checkpoint(lowerCAmelCase_, lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
__UpperCAmelCase : Any = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Dict = 256
__UpperCAmelCase : Dict = 224
__UpperCAmelCase : int = EfficientFormerImageProcessor(
size={"shortest_edge": image_size}, crop_size={"height": crop_size, "width": crop_size}, resample=pillow_resamplings["bicubic"], )
__UpperCAmelCase : Optional[int] = processor(images=lowerCAmelCase_, return_tensors="pt" ).pixel_values
# original processing pipeline
__UpperCAmelCase : str = Compose(
[
Resize(lowerCAmelCase_, interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
Normalize(lowerCAmelCase_, lowerCAmelCase_ ),
] )
__UpperCAmelCase : Any = image_transforms(lowerCAmelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ )
__UpperCAmelCase : Tuple = model(lowerCAmelCase_ )
__UpperCAmelCase : Tuple = outputs.logits
__UpperCAmelCase : List[Any] = (1, 1000)
if "l1" in model_name:
__UpperCAmelCase : Optional[Any] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10], lowerCAmelCase_, atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__UpperCAmelCase : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10], lowerCAmelCase_, atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__UpperCAmelCase : Tuple = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(lowerCAmelCase_ )
print(F"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}", commit_message="Add model", use_temp_dir=lowerCAmelCase_, )
processor.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}", commit_message="Add image processor", use_temp_dir=lowerCAmelCase_, )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase__ : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 368 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 37 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_lowerCamelCase , n - 1 , _lowerCamelCase ) * a) % mod
else:
_lowerCAmelCase : List[Any] = binary_exponentiation(_lowerCamelCase , n / 2 , _lowerCamelCase )
return (b * b) % mod
# a prime number
_snake_case = 701
_snake_case = 10_0000_0000
_snake_case = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 36 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 218 |
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 218 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase_ = 'zero2'
lowerCAmelCase_ = 'zero3'
lowerCAmelCase_ = [ZEROa, ZEROa]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowercase__ : Optional[Any] = parameterized.to_safe_name('''_'''.join(str(__lowerCamelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( A_ ):
'''simple docstring'''
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : str ,_snake_case : Tuple ,_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
@require_torch_multi_gpu
@parameterized.expand(_snake_case ,name_func=_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=_snake_case ,model=_snake_case ,distributed=_snake_case ,fpaa=_snake_case ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Any ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : str ,_snake_case : int = 10 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : bool = True ,) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = models[model]
lowercase__ : Optional[int] = self.run_trainer(
stage=_snake_case ,model_name=_snake_case ,eval_steps=_snake_case ,num_train_epochs=1 ,distributed=_snake_case ,fpaa=_snake_case ,)
self.do_checks(_snake_case )
return output_dir
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : str ,_snake_case : int = 10 ,_snake_case : int = 1 ,_snake_case : bool = True ,_snake_case : bool = True ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = self.get_auto_remove_tmp_dir('''./xxx''' ,after=_snake_case )
lowercase__ : Union[str, Any] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase__ : Any = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowercase__ : Optional[int] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowercase__ : int = self.get_launcher(_snake_case )
lowercase__ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case ,env=self.get_env() )
return output_dir
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str]=False ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = min(2 ,get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 16 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase ):
def __init__( self : List[Any] , lowercase_ : Any , lowercase_ : Any ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
snake_case_ = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case_ = audio_length_in_s * self.unet.config.sample_rate
snake_case_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
snake_case_ = int(lowercase_ )
if sample_size % down_scale_factor != 0:
snake_case_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
snake_case_ = int(lowercase_ )
snake_case_ = next(iter(self.unet.parameters() ) ).dtype
snake_case_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
snake_case_ = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
snake_case_ = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
snake_case_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 371 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=13 , lowercase_ : int=64 , lowercase_ : Tuple=2 , lowercase_ : List[str]=3 , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : int=32 , lowercase_ : int=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : Any=10 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=[1, 16, 4, 4] , lowercase_ : Tuple=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case_ = (self.image_size // 32) ** 2
snake_case_ = num_patches + 1
def A_ ( self : List[Any] ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ):
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : int ):
snake_case_ = ViTHybridModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTHybridForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ):
snake_case_ = ViTHybridModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def A_ ( self : Any ):
pass
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowercase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def A_ ( self : Tuple ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTHybridModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : List[str] ):
snake_case_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
@require_accelerate
def A_ ( self : Dict ):
snake_case_ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
snake_case_ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
snake_case_ = model(**lowercase_ )
snake_case_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 72 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
a :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
a :Tuple = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_zero=_lowerCamelCase , )
torch.manual_seed(0 )
a :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
a :Optional[int] = CLIPTextModel(_lowerCamelCase )
a :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a :Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Tuple = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :int = torch.manual_seed(_lowerCamelCase )
else:
a :int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Dict = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Dict = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Union[str, Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Dict = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :List[Any] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
a :Any = self.get_dummy_components()
a :str = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
a :Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
a :List[Any] = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
a :int = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
a :Dict = self.get_dummy_inputs(_lowerCamelCase )
a :Tuple = pipe_loaded(**_lowerCamelCase )[0]
a :List[str] = np.abs(output - output_loaded ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = '''cpu'''
a :Optional[int] = self.get_dummy_components()
a :List[str] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Any = self.get_dummy_mask_inputs(_lowerCamelCase )
a :str = pipe.generate_mask(**_lowerCamelCase )
a :List[str] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
a :List[Any] = np.array([0] * 9 )
a :str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = '''cpu'''
a :List[str] = self.get_dummy_components()
a :List[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :str = self.get_dummy_inversion_inputs(_lowerCamelCase )
a :Any = pipe.invert(**_lowerCamelCase ).images
a :Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
a :str = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
a :str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = '''cpu'''
a :str = self.get_dummy_components()
a :Union[str, Any] = {'''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
a :Dict = DPMSolverMultistepScheduler(**_lowerCamelCase )
a :List[str] = DPMSolverMultistepInverseScheduler(**_lowerCamelCase )
a :int = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :str = self.get_dummy_inversion_inputs(_lowerCamelCase )
a :Tuple = pipe.invert(**_lowerCamelCase ).images
a :Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
a :int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
a :Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
a :Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
a :Union[str, Any] = raw_image.convert('''RGB''' ).resize((768, 768) )
a :List[Any] = raw_image
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = torch.manual_seed(0 )
a :int = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
a :Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
a :Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Optional[Any] = '''a bowl of fruit'''
a :Any = '''a bowl of pears'''
a :Any = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
a :Dict = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase ).latents
a :List[str] = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
a :List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = torch.manual_seed(0 )
a :List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
a :Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a :Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Dict = '''a bowl of fruit'''
a :Optional[Any] = '''a bowl of pears'''
a :Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
a :Dict = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase , num_inference_steps=25 , ).latents
a :str = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
a :List[Any] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 94 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = KandinskyVaaPipeline
__lowercase = [
"""image_embeds""",
"""negative_image_embeds""",
]
__lowercase = ["""image_embeds""", """negative_image_embeds"""]
__lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase = False
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
return 32
@property
def UpperCAmelCase_ ( self :int )-> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self :Any )-> List[Any]:
return 1_00
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
torch.manual_seed(0 )
A__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ = UNetaDConditionModel(**lowercase_ )
return model
@property
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self :Dict )-> Union[str, Any]:
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowercase_ , )
A__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self :Dict , lowercase_ :int , lowercase_ :Union[str, Any]=0 )-> Dict:
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase_ )
if str(lowercase_ ).startswith("mps" ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
A__ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = pipe(**self.get_dummy_inputs(lowercase_ ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict )-> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self :Any )-> List[Any]:
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
A__ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
A__ = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
A__ = "red cat, 4k photo"
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__, A__ = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__ = pipeline(
image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=1_00 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 237 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( __magic_name__ : List[Any] ):
if hor == 128:
a__: Union[str, Any] =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: Optional[int] =(32, 128, 256)
a__: Tuple =("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a__: Tuple =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: List[Any] =(32, 64, 128, 256)
a__: List[str] =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a__: Dict =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
a__: Optional[int] =model.state_dict()
a__: str ={
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a__: List[Any] =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: Tuple =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: Dict =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] ={
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a__: Union[str, Any] =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a__: Any =model
a__: str =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: List[str] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: List[Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase ( self , __a=0) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = np.random.RandomState(__a)
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCamelCase = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCamelCase = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCamelCase = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCamelCase = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCamelCase = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCamelCase = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = pipe(**__a)
_UpperCamelCase = output.images[0, -3:, -3:, -1]
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * [inputs.pop('''prompt''')]
_UpperCamelCase = pipe.tokenizer(
__a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''np''' , )
_UpperCamelCase = text_inputs['''input_ids''']
_UpperCamelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
_UpperCamelCase = prompt_embeds
# forward
_UpperCamelCase = pipe(**__a)
_UpperCamelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * ['''this is a negative prompt''']
_UpperCamelCase = negative_prompt
_UpperCamelCase = 3 * [inputs['''prompt''']]
# forward
_UpperCamelCase = pipe(**__a)
_UpperCamelCase = output.images[0, -3:, -3:, -1]
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = 3 * [inputs.pop('''prompt''')]
_UpperCamelCase = []
for p in [prompt, negative_prompt]:
_UpperCamelCase = pipe.tokenizer(
__a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''np''' , )
_UpperCamelCase = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
_UpperCamelCase , _UpperCamelCase = embeds
# forward
_UpperCamelCase = pipe(**__a)
_UpperCamelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase( unittest.TestCase ):
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
np.random.seed(0)
_UpperCamelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''')
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''open neural network exchange'''
_UpperCamelCase = np.random.RandomState(0)
_UpperCamelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='''np''')
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''open neural network exchange'''
_UpperCamelCase = np.random.RandomState(0)
_UpperCamelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='''np''')
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = 0
def test_callback_fn(__a , __a , __a) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
_UpperCamelCase = False
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''Andromeda galaxy in a bottle'''
_UpperCamelCase = np.random.RandomState(0)
pipe(
prompt=__a , num_inference_steps=5 , guidance_scale=7.5 , generator=__a , callback=__a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__a , __a)
assert pipe.safety_checker is None
_UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a)
_UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(__a)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
| 194 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if isinstance(__a , __a):
_UpperCamelCase = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
if len(__a) == 0 or len(__a) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__a))
if isinstance(__a , __a):
_UpperCamelCase = [sequences]
_UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase )
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = args_parser
super().__init__(*__a , **__a)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def UpperCAmelCase ( self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
_UpperCamelCase = self.tokenizer.eos_token
try:
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
if kwargs.get('''multi_class''' , __a) is not None:
_UpperCamelCase = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
_UpperCamelCase = {}
if "candidate_labels" in kwargs:
_UpperCamelCase = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
_UpperCamelCase = kwargs['''hypothesis_template''']
_UpperCamelCase = {}
if "multi_label" in kwargs:
_UpperCamelCase = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , __a , *__a , **__a , ) -> int:
'''simple docstring'''
if len(__a) == 0:
pass
elif len(__a) == 1 and "candidate_labels" not in kwargs:
_UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''')
return super().__call__(__a , **__a)
def UpperCAmelCase ( self , __a , __a=None , __a="This example is {}.") -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self._args_parser(__a , __a , __a)
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a)):
_UpperCamelCase = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a) - 1,
**model_input,
}
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = inputs['''candidate_label''']
_UpperCamelCase = inputs['''sequence''']
_UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCamelCase = self.model(**__a)
_UpperCamelCase = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self , __a , __a=False) -> Dict:
'''simple docstring'''
_UpperCamelCase = [outputs['''candidate_label'''] for outputs in model_outputs]
_UpperCamelCase = [outputs['''sequence'''] for outputs in model_outputs]
_UpperCamelCase = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
_UpperCamelCase = logits.shape[0]
_UpperCamelCase = len(__a)
_UpperCamelCase = N // n
_UpperCamelCase = logits.reshape((num_sequences, n, -1))
if multi_label or len(__a) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCamelCase = self.entailment_id
_UpperCamelCase = -1 if entailment_id == 0 else 0
_UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCamelCase = reshaped_outputs[..., self.entailment_id]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 194 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : List[str] = {
'google/rembert': 256,
}
UpperCAmelCase : List[Any] = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = RemBertTokenizer
def __init__( self : Optional[int] , UpperCamelCase : str=None , UpperCamelCase : Tuple=None , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Optional[Any]="[SEP]" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Dict="[CLS]" , UpperCamelCase : Union[str, Any]="[MASK]" , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : Tuple = do_lower_case
__UpperCAmelCase : List[str] = remove_space
__UpperCAmelCase : Dict = keep_accents
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase ) )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 366 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""keras_nlp"""]
def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 320 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( lowerCamelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowercase__ , unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase__ , )
lowerCAmelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase__ = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
# predict the noise residual
lowerCAmelCase__ = self.unet(lowercase__ , lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase__ = self.vqvae.decode(lowercase__ ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 340 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Union[str, Any] = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Any]=None ,**lowercase__ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[str] ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase__ ,)
return self.image_processor
| 104 | 0 |
from string import ascii_uppercase
lowerCAmelCase__ : int ={char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase__ : int =dict(enumerate(ascii_uppercase))
def __lowercase ( a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = 0
while True:
if x == i:
__SCREAMING_SNAKE_CASE = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowercase ( a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__SCREAMING_SNAKE_CASE = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowercase ( a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__SCREAMING_SNAKE_CASE = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowercase ( ) -> None:
__SCREAMING_SNAKE_CASE = 'THE GERMAN ATTACK'
__SCREAMING_SNAKE_CASE = 'SECRET'
__SCREAMING_SNAKE_CASE = generate_key(a__ , a__ )
__SCREAMING_SNAKE_CASE = cipher_text(a__ , a__ )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 118 |
from __future__ import annotations
from collections.abc import Generator
def __lowercase ( ) -> Generator[int, None, None]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 2
while True:
__SCREAMING_SNAKE_CASE = factor_map.pop(a__ , a__ )
if factor:
__SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
__SCREAMING_SNAKE_CASE = factor
else:
__SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def __lowercase ( a__ = 1E10 ) -> int:
__SCREAMING_SNAKE_CASE = sieve()
__SCREAMING_SNAKE_CASE = 1
while True:
__SCREAMING_SNAKE_CASE = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 118 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCAmelCase_ (__a : list[int] , __a : list[int] , __a : int ):
"""simple docstring"""
_a : str = [0] * no_of_processes
_a : Optional[int] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__a ):
_a : Optional[Any] = burst_time[i]
_a : list[int] = []
_a : Tuple = 0
_a : int = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_a : Optional[int] = []
_a : int = -1
for i in range(__a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__a )
if len(__a ) > 0:
_a : List[str] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_a : Dict = i
total_time += burst_time[target_process]
completed += 1
_a : Tuple = 0
_a : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase_ (__a : list[int] , __a : int , __a : list[int] ):
"""simple docstring"""
_a : Dict = [0] * no_of_processes
for i in range(__a ):
_a : Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__lowerCAmelCase = 4
__lowerCAmelCase = [2, 5, 3, 7]
__lowerCAmelCase = [0, 0, 0, 0]
__lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 271 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : int=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = data
UpperCAmelCase__ : str = None
def __repr__( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[int] = self
while temp:
string_rep.append(f"""{temp.data}""" )
UpperCAmelCase__ : Optional[int] = temp.next
return "->".join(lowercase_ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
if not elements_list:
raise Exception('''The Elements List is empty''' )
UpperCAmelCase__ : str = Node(elements_list[0] )
for i in range(1 , len(UpperCAmelCase__ ) ):
UpperCAmelCase__ : List[str] = Node(elements_list[i] )
UpperCAmelCase__ : int = current.next
return head
def a__ ( lowerCAmelCase__ ) -> None:
if head_node is not None and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def a__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
UpperCAmelCase__ : Any = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(UpperCAmelCase__ )
print('''Elements in Reverse:''' )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 371 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 299 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 1
snake_case_ = 2
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[str] ,*A : Optional[Any] ,**A : str ):
super().__init__(*A ,**A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__A = None
if self.model.config.prefix is not None:
__A = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__A = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__A , __A , __A = self._sanitize_parameters(prefix=A ,**self._forward_params )
__A = {**self._preprocess_params, **preprocess_params}
__A = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, Any]=None ,A : int=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Tuple=None ,A : int=None ,A : str=None ,A : Dict=None ,**A : Optional[Any] ,):
__A = {}
if prefix is not None:
__A = prefix
if prefix:
__A = self.tokenizer(
A ,padding=A ,add_special_tokens=A ,return_tensors=self.framework )
__A = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
__A = handle_long_generation
preprocess_params.update(A )
__A = generate_kwargs
__A = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
__A = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
__A = ReturnType.TENSORS
if return_type is not None:
__A = return_type
if clean_up_tokenization_spaces is not None:
__A = clean_up_tokenization_spaces
if stop_sequence is not None:
__A = self.tokenizer.encode(A ,add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__A = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : Union[str, Any] ,*A : Any ,**A : int ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*A ,**A )
def __call__( self : List[str] ,A : int ,**A : Dict ):
return super().__call__(A ,**A )
def UpperCamelCase_ ( self : str ,A : Optional[int] ,A : Any="" ,A : Union[str, Any]=None ,**A : Any ):
__A = self.tokenizer(
prefix + prompt_text ,padding=A ,add_special_tokens=A ,return_tensors=self.framework )
__A = prompt_text
if handle_long_generation == "hole":
__A = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
__A = generate_kwargs["max_new_tokens"]
else:
__A = generate_kwargs.get("max_length" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__A = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
__A = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
__A = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : int ,A : str ,**A : List[Any] ):
__A = model_inputs["input_ids"]
__A = model_inputs.get("attention_mask" ,A )
# Allow empty prompts
if input_ids.shape[1] == 0:
__A = None
__A = None
__A = 1
else:
__A = input_ids.shape[0]
__A = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__A = generate_kwargs.pop("prefix_length" ,0 )
if prefix_length > 0:
__A = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
__A = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__A = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__A = self.model.generate(input_ids=A ,attention_mask=A ,**A )
__A = generated_sequence.shape[0]
if self.framework == "pt":
__A = generated_sequence.reshape(A ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
__A = tf.reshape(A ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : Optional[int]=ReturnType.FULL_TEXT ,A : Dict=True ):
__A = model_outputs["generated_sequence"][0]
__A = model_outputs["input_ids"]
__A = model_outputs["prompt_text"]
__A = generated_sequence.numpy().tolist()
__A = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__A = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__A = self.tokenizer.decode(
A ,skip_special_tokens=A ,clean_up_tokenization_spaces=A ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__A = 0
else:
__A = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=A ,clean_up_tokenization_spaces=A ,) )
if return_type == ReturnType.FULL_TEXT:
__A = prompt_text + text[prompt_length:]
else:
__A = text[prompt_length:]
__A = {"generated_text": all_text}
records.append(A )
return records
| 15 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE :Dict = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=False ,A : int=False ,A : Union[str, Any]=False ,A : int=None ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : Optional[Any]=None ,A : Optional[Dict[str, Any]] = None ,**A : Tuple ,):
__A = {} if sp_model_kwargs is None else sp_model_kwargs
__A = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__A = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A = "<|endoftext|>" if eos_token is None else eos_token
__A = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A = unk_token if pad_token is None else pad_token
__A = eos_token if bos_token is None else bos_token
else:
__A = "<pad>" if pad_token is None else pad_token
__A = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=A ,remove_space=A ,keep_accents=A ,bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
__A = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A = re.compile(
f'''[{''.join(map(A ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(1_27 ,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' )
def __getstate__( self : Optional[int] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Optional[Any] ,A : Union[str, Any] ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self : List[str] ):
return len(self.sp_model )
def UpperCamelCase_ ( self : int ,A : str ):
__A = self.non_printing_characters_re.sub("" ,A )
# Normalize whitespaces
__A = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__A = unicodedata.normalize("NFC" ,A )
return text
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,**A : Optional[int] ):
__A = self.preprocess_text(A )
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : int ):
return self.sp_model.IdToPiece(A )
@staticmethod
def UpperCamelCase_ ( A : str ):
return out_string
def UpperCamelCase_ ( self : str ,A : List[str] ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string
def UpperCamelCase_ ( self : str ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : List[str] ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[str, bool] = False ):
if isinstance(A ,A ):
__A = self.preprocess_text(A )
__A = self.sp_model.encode(A )
else:
__A = [self.preprocess_text(A ) for t in text]
__A = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
__A = torch.tensor(A )
return token_ids
def UpperCamelCase_ ( self : List[Any] ,A : Union[int, List[int]] ):
return self.sp_model.decode(A )
def UpperCamelCase_ ( self : List[str] ,A : "Conversation" ):
__A = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__A = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=A )
| 15 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 360 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowercase = 250004
__lowercase = 250020
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = MBartTokenizer
UpperCAmelCase : Dict = MBartTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = True
def __snake_case ( self : List[Any]):
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = MBartTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : List[Any]):
a : str = MBartTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a : Tuple = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __snake_case ( self : Any):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a : Optional[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a : Optional[int] = tempfile.mkdtemp()
a : Optional[int] = tokenizer_r.save_pretrained(__UpperCAmelCase)
a : List[Any] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
a : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase)
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : int = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCAmelCase)
# Save tokenizer rust, legacy_format=True
a : Optional[Any] = tempfile.mkdtemp()
a : Tuple = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase)
a : List[str] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase)
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
shutil.rmtree(__UpperCAmelCase)
# Save tokenizer rust, legacy_format=False
a : Dict = tempfile.mkdtemp()
a : List[str] = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase)
a : Optional[int] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a : Any = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : Any = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
shutil.rmtree(__UpperCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = """facebook/mbart-large-en-ro"""
UpperCAmelCase : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase : str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCAmelCase : str = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __snake_case ( cls : str):
a : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
a : Optional[int] = 1
return cls
def __snake_case ( self : Optional[Any]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020)
def __snake_case ( self : Union[str, Any]):
a : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
def __snake_case ( self : int):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids)
a : Tuple = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
a : List[Any] = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCAmelCase)
a : Tuple = 10
a : Dict = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __UpperCAmelCase)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001])
def __snake_case ( self : str):
a : List[str] = tempfile.mkdtemp()
a : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase)
a : Dict = MBartTokenizer.from_pretrained(__UpperCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase)
@require_torch
def __snake_case ( self : Dict):
a : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt")
a : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self : List[Any]):
a : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
a : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def __snake_case ( self : Optional[int]):
a : List[str] = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt")
a : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt")
a : str = targets["input_ids"]
a : Optional[Any] = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def __snake_case ( self : Any):
a : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 40 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Any = n
__lowercase : Any = [None] * self.n
__lowercase : Optional[int] = 0 # index of the first element
__lowercase : Optional[int] = 0
__lowercase : Any = 0
def __len__( self ) -> int:
return self.size
def _lowerCamelCase ( self ) -> bool:
return self.size == 0
def _lowerCamelCase ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
__lowercase : Any = data
__lowercase : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ) -> List[Any]:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
__lowercase : Any = self.array[self.front]
__lowercase : int = None
__lowercase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 249 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=4 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Tuple = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : str = use_attention_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : str = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : Union[str, Any] = num_choices
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_attention_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : int = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : int ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase_ : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = FlaxAlbertModelTester(self )
@slow
def lowercase__ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : str = model_class_name.from_pretrained('albert-base-v2' )
lowerCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
@require_flax
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase )[0]
lowerCAmelCase : Optional[int] = (1, 11, 768)
self.assertEqual(output.shape , __lowercase )
lowerCAmelCase : List[str] = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) )
| 367 |
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """bloom"""
UpperCAmelCase_ : List[Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[Any] = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Any , lowercase_ : Tuple=250_880 , lowercase_ : List[str]=64 , lowercase_ : List[str]=2 , lowercase_ : Tuple=8 , lowercase_ : List[str]=1E-5 , lowercase_ : List[str]=0.02 , lowercase_ : int=True , lowercase_ : Dict=1 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=False , lowercase_ : Dict=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=1 , lowercase_ : Dict=False , **lowercase_ : Union[str, Any] , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase : Optional[int] = kwargs.pop('n_embed' , lowercase_ )
UpperCAmelCase : List[Any] = hidden_size if n_embed is None else n_embed
UpperCAmelCase : str = n_layer
UpperCAmelCase : Optional[Any] = n_head
UpperCAmelCase : Optional[Any] = layer_norm_epsilon
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = use_cache
UpperCAmelCase : Union[str, Any] = pretraining_tp
UpperCAmelCase : int = apply_residual_connection_post_layernorm
UpperCAmelCase : List[str] = hidden_dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : str = bos_token_id
UpperCAmelCase : Any = eos_token_id
UpperCAmelCase : Optional[int] = slow_but_exact
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = version.parse("""1.12""" )
def __init__( self : Any , lowercase_ : PretrainedConfig , lowercase_ : str = "default" , lowercase_ : List[PatchingSpec] = None , lowercase_ : bool = False , ) -> Union[str, Any]:
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ )
if not getattr(self._config , 'pad_token_id' , lowercase_ ):
# TODO: how to do that better?
UpperCAmelCase : str = 0
@property
def UpperCAmelCase_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase_ , direction='inputs' , inverted_values_shape=lowercase_ )
UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCAmelCase : str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self : int ) -> int:
return self._config.n_layer
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return self._config.n_head
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
def UpperCAmelCase_ ( self : Tuple , lowercase_ : "PreTrainedTokenizer" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
UpperCAmelCase : List[Any] = super(lowercase_ , self ).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Optional[int] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase : List[Any] = seqlen + 2
UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase : Dict = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase : Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase : Optional[Any] = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
UpperCAmelCase : List[str] = common_inputs['attention_mask']
if self.use_past:
UpperCAmelCase : Union[str, Any] = ordered_inputs['attention_mask'].dtype
UpperCAmelCase : Optional[int] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
return 13
| 151 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase : Tuple = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ = [randint(-1000, 1000) for i in range(100)]
lowercase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 151 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_snake_case = get_logger(__name__)
_snake_case = r'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'''
class _snake_case :
@add_start_docstrings(_a )
def __call__( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] ) -> int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case :
@add_start_docstrings(_a )
def __call__( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case ( __SCREAMING_SNAKE_CASE ):
@add_start_docstrings(_a )
def __call__( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: Dict , **__lowerCamelCase: Dict ) -> List[Any]:
for processor in self:
__UpperCAmelCase : Union[str, Any] = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__UpperCAmelCase : Tuple = processor(_a , _a , _a , **_a )
else:
__UpperCAmelCase : List[str] = processor(_a , _a , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Tuple , __lowerCamelCase: Optional[Any] ) -> Optional[Any]:
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__UpperCAmelCase : str = temperature
def __call__( self: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: str ) -> Dict:
__UpperCAmelCase : List[Any] = scores / self.temperature
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: int = -float("Inf" ) , __lowerCamelCase: Any = 1 ) -> int:
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__UpperCAmelCase : Any = top_p
__UpperCAmelCase : str = filter_value
__UpperCAmelCase : List[str] = min_tokens_to_keep
def __call__( self: str , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: int ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Dict = lax.top_k(_a , scores.shape[-1] )
__UpperCAmelCase : Optional[Any] = jnp.full_like(_a , self.filter_value )
__UpperCAmelCase : Optional[int] = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__UpperCAmelCase : Union[str, Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase : List[str] = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__UpperCAmelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__UpperCAmelCase : List[str] = jnp.where(_a , _a , _a )
__UpperCAmelCase : Optional[int] = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any = -float("Inf" ) , __lowerCamelCase: List[str] = 1 ) -> Union[str, Any]:
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__UpperCAmelCase : Any = max(_a , _a )
__UpperCAmelCase : int = filter_value
def __call__( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = scores.shape
__UpperCAmelCase : Dict = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCAmelCase : Tuple = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCAmelCase , __UpperCAmelCase : str = lax.top_k(_a , _a )
__UpperCAmelCase : Optional[Any] = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCAmelCase : str = topk_scores.flatten()
__UpperCAmelCase : Union[str, Any] = topk_indices.flatten() + shift
__UpperCAmelCase : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_a )
__UpperCAmelCase : int = next_scores_flat.reshape(_a , _a )
return next_scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: List[str] , __lowerCamelCase: int ) -> Optional[int]:
__UpperCAmelCase : List[str] = bos_token_id
def __call__( self: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : int = jnp.full(scores.shape , -float("inf" ) )
__UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
__UpperCAmelCase : int = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Tuple = max_length
__UpperCAmelCase : Dict = eos_token_id
def __call__( self: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = jnp.full(scores.shape , -float("inf" ) )
__UpperCAmelCase : Optional[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCAmelCase : List[Any] = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] ) -> Dict:
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__UpperCAmelCase : List[str] = min_length
__UpperCAmelCase : Dict = eos_token_id
def __call__( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple ) -> Optional[Any]:
# create boolean flag to decide if min length penalty should be applied
__UpperCAmelCase : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCAmelCase : Optional[int] = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Any , __lowerCamelCase: Tuple , __lowerCamelCase: Any ) -> Optional[Any]:
__UpperCAmelCase : str = list(_a )
__UpperCAmelCase : Any = begin_index
def __call__( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] ) -> Tuple:
__UpperCAmelCase : Any = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCAmelCase : str = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Optional[Any] , __lowerCamelCase: Optional[Any] ) -> Tuple:
__UpperCAmelCase : int = list(_a )
def __call__( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] ) -> Dict:
__UpperCAmelCase : int = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Dict , __lowerCamelCase: int ) -> Optional[int]:
__UpperCAmelCase : List[str] = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase : str = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase : Optional[Any] = force_token_array.at[index].set(_a )
__UpperCAmelCase : Union[str, Any] = jnp.intaa(_a )
def __call__( self: str , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] ) -> Optional[Any]:
def _force_token(__lowerCamelCase: Dict ):
__UpperCAmelCase : Optional[Any] = scores.shape[0]
__UpperCAmelCase : Dict = self.force_token_array[generation_idx]
__UpperCAmelCase : Union[str, Any] = jnp.ones_like(_a , dtype=scores.dtype ) * -float("inf" )
__UpperCAmelCase : List[str] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCAmelCase : Optional[Any] = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__UpperCAmelCase : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = generate_config.eos_token_id
__UpperCAmelCase : int = generate_config.no_timestamps_token_id
__UpperCAmelCase : List[Any] = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , "max_initial_timestamp_index" ):
__UpperCAmelCase : Union[str, Any] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase : int = model_config.vocab_size
def __call__( self: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Optional[int]:
# suppress <|notimestamps|> which is handled by without_timestamps
__UpperCAmelCase : List[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__lowerCamelCase: Any , __lowerCamelCase: str ):
__UpperCAmelCase : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__UpperCAmelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__UpperCAmelCase : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__UpperCAmelCase : Optional[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _a , )
__UpperCAmelCase : int = jax.vmap(_a )(_a , _a )
__UpperCAmelCase : List[Any] = jnp.where(cur_len == self.begin_index , _a , _a )
__UpperCAmelCase : Optional[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__UpperCAmelCase : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase : List[Any] = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase : Any = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(__lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ):
__UpperCAmelCase : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCAmelCase : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _a , )
__UpperCAmelCase : Tuple = jax.vmap(_a )(_a , _a )
return scores
| 361 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_snake_case = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
_snake_case = {
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
_snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = PRETRAINED_VOCAB_FILES_MAP
__A : Union[str, Any] = ["input_ids", "attention_mask"]
__A : List[int] = []
__A : List[int] = []
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=None , __A=None , __A=None , __A = None , __A=None , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__A , **__A , )
lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowerCamelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase : List[str] = 1
lowerCamelCase : Optional[Any] = len(self.sp_model )
lowerCamelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
lowerCamelCase : int = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase : Union[str, Any] = src_lang if src_lang is not None else "eng_Latn"
lowerCamelCase : Union[str, Any] = self.lang_code_to_id[self._src_lang]
lowerCamelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : int = self.__dict__.copy()
lowerCamelCase : Any = None
lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
lowerCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
lowerCamelCase : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __A , __A , __A , __A , **__A ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase : Any = src_lang
lowerCamelCase : Optional[int] = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
lowerCamelCase : int = self.convert_tokens_to_ids(__A )
lowerCamelCase : List[str] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : str = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = "".join(__A ).replace(__A , " " ).strip()
return out_string
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Union[str, Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = "eng_Latn" , __A = None , __A = "fra_Latn" , **__A , ):
"""simple docstring"""
lowerCamelCase : str = src_lang
lowerCamelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCamelCase : Optional[int] = []
lowerCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase : int = [self.cur_lang_code]
lowerCamelCase : Tuple = [self.eos_token_id]
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase : List[str] = [self.cur_lang_code]
lowerCamelCase : Optional[Any] = [self.eos_token_id]
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase : List[Any] = 1
lowerCamelCase : Union[str, Any] = 1
while repunit:
lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : List[str] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = 'T5Config'
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'mt5'
UpperCAmelCase_ = MTaConfig
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'mt5'
UpperCAmelCase_ = MTaConfig
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'mt5'
UpperCAmelCase_ = MTaConfig
| 119 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 1_6 ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 119 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=True , A=False , A=False , A=False , A=2 , A=99 , A=0 , A=32 , A=5 , A=4 , A=0.1 , A=0.1 , A=512 , A=12 , A=2 , A=0.02 , A=3 , A=4 , A="last" , A=None , A=None , ) -> Dict:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ (self ) -> int:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = FlaubertModel(config=A )
model.to(A )
model.eval()
_a = model(A , lengths=A , langs=A )
_a = model(A , langs=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaubertWithLMHeadModel(A )
model.to(A )
model.eval()
_a = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = FlaubertForQuestionAnsweringSimple(A )
model.to(A )
model.eval()
_a = model(A )
_a = model(A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = FlaubertForQuestionAnswering(A )
model.to(A )
model.eval()
_a = model(A )
_a = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , p_mask=A , )
_a = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(A , start_positions=A , end_positions=A )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
"""simple docstring"""
_a = FlaubertForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A )
_a = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> str:
"""simple docstring"""
_a = self.num_labels
_a = FlaubertForTokenClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = self.num_choices
_a = FlaubertForMultipleChoice(config=A )
model.to(A )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ (self , A , A , A , A , A ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ (self , A , A , A=False ) -> Tuple:
"""simple docstring"""
_a = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = FlaubertModelTester(self )
_a = ConfigTester(self , config_class=A , emb_dim=37 )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A )
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = FlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_a = True
_a = model_class(config=A )
_a = self._prepare_for_class(A , A )
_a = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
_a = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
_a = model(A )[0]
_a = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) )
| 211 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __A ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class __A ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : int = datasets.Audio()
__lowerCamelCase : str = 'audio'
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' )
lowercase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowercase_ = AUDIO_EXTENSIONS
| 211 | 1 |
A : List[str] = "Alexander Joslin"
import operator as op
from .stack import Stack
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
SCREAMING_SNAKE_CASE_ = Stack()
SCREAMING_SNAKE_CASE_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCamelCase )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE_ = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE_ = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE_ = operators[opr](__UpperCamelCase , __UpperCamelCase )
operand_stack.push(__UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A : List[str] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 305 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase : Tuple = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "sequence-classification"
def __init__( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = glue_output_modes[hparams.task]
lowercase_ = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode)
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
lowercase_ = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.hparams
lowercase_ = processors[args.task]()
lowercase_ = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowercase_ = convert_examples_to_features(
lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = """dev""" if mode == """test""" else mode
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowercase_ = np.argmax(lowerCAmelCase_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowercase_ = np.squeeze(lowerCAmelCase_)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : list):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
add_generic_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = GLUETransformer.add_model_specific_args(__lowerCAmelCase , os.getcwd() )
lowercase_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
lowercase_ = GLUETransformer(__lowerCAmelCase )
lowercase_ = generic_train(__lowerCAmelCase , __lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__lowerCAmelCase ) )
lowercase_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 136 | 1 |
"""simple docstring"""
from __future__ import annotations
def _A ( lowercase , lowercase ):
"""simple docstring"""
a , a =set(lowercase ), [start]
while stack:
a =stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
lowerCamelCase_ : Any = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 215 |
"""simple docstring"""
from __future__ import annotations
def _A ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 | 1 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = len(lowerCamelCase__ )
UpperCAmelCase_ = len(lowerCamelCase__ )
UpperCAmelCase_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCAmelCase_ = True
for i in range(lowerCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCAmelCase_ = True
if a[i].islower():
UpperCAmelCase_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = 1 # (0 is vertical, 1 is horizontal)
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = get_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
print("Processing..." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = update_image_and_anno(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for index, image in enumerate(lowerCAmelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ = random_chars(32 )
UpperCAmelCase_ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase_ = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(lowerCAmelCase__ )} with {file_name}""" )
UpperCAmelCase_ = []
for anno in new_annos[index]:
UpperCAmelCase_ = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(lowerCAmelCase__ )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for label_file in glob.glob(os.path.join(lowerCAmelCase__ , "*.txt" ) ):
UpperCAmelCase_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase__ ) as in_file:
UpperCAmelCase_ = in_file.readlines()
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{label_name}.jpg""" )
UpperCAmelCase_ = []
for obj_list in obj_lists:
UpperCAmelCase_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCAmelCase__ )
labels.append(lowerCAmelCase__ )
return img_paths, labels
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for idx in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = []
UpperCAmelCase_ = img_list[idx]
path_list.append(lowerCAmelCase__ )
UpperCAmelCase_ = anno_list[idx]
UpperCAmelCase_ = cva.imread(lowerCAmelCase__ )
if flip_type == 1:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCAmelCase__ )
new_imgs_list.append(lowerCAmelCase__ )
return new_imgs_list, new_annos_lists, path_list
def a__ ( lowerCAmelCase__ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 241 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : str , lowercase : str , lowercase : Path , lowercase : str = None , lowercase : str = None , lowercase : str = None , ) -> Optional[int]:
if config_name_or_path is None:
__snake_case : List[str] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
__snake_case : List[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__snake_case : Union[str, Any] = question_encoder_name_or_path
__snake_case : int = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
__snake_case : Tuple = RagConfig.from_pretrained(lowercase )
__snake_case : int = AutoConfig.from_pretrained(lowercase )
__snake_case : str = AutoConfig.from_pretrained(lowercase )
__snake_case : Dict = gen_config
__snake_case : Optional[Any] = question_encoder_config
__snake_case : List[str] = model_class.from_pretrained_question_encoder_generator(
lowercase , lowercase , config=lowercase )
rag_model.save_pretrained(lowercase )
# Sanity check.
model_class.from_pretrained(lowercase )
# Save tokenizers.
__snake_case : Tuple = AutoTokenizer.from_pretrained(lowercase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
__snake_case : int = AutoTokenizer.from_pretrained(lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 326 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : int = use_attention_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[str] = vocab_size
__snake_case : int = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[Any] = num_choices
__snake_case : Union[str, Any] = rescale_embeddings
__snake_case : List[Any] = attention_type
__snake_case : str = use_bias
__snake_case : Dict = block_size
__snake_case : Optional[Any] = num_random_blocks
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_attention_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_token_type_ids:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : Dict =False
UpperCAmelCase_ : str =False
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 326 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase ( ) -> List[Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21 | 1 |
'''simple docstring'''
class A__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[bool]] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = row
_UpperCAmelCase : Tuple = col
_UpperCAmelCase : Union[str, Any] = graph
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[bool]] ) -> Any:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[list[bool]] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_UpperCAmelCase : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1]
_UpperCAmelCase : Tuple = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowercase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowercase_ )
def _lowerCAmelCase ( self : int ) -> Dict: # And finally, count all islands.
"""simple docstring"""
_UpperCAmelCase : str = [[False for j in range(self.COL )] for i in range(self.ROW )]
_UpperCAmelCase : Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowercase_ , lowercase_ , lowercase_ )
count += 1
return count
| 145 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ : int = logging.get_logger(__name__)
def _A (__a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale
SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = offset
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_)
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa)
if offset:
SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_)
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_)
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_)
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_)
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_)
return image
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( snake_case__ : str = "AAPL" ) -> str:
UpperCamelCase : Union[str, Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Dict = BeautifulSoup(requests.get(snake_case__ ).text , 'html.parser' )
UpperCamelCase : str = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 350 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = ["input_values", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 1_6000, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 64, SCREAMING_SNAKE_CASE_ = "hann_window", SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 7600, SCREAMING_SNAKE_CASE_ = 1e-10, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, padding_value=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = do_normalize
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Union[str, Any] = num_mel_bins
UpperCamelCase : int = hop_length
UpperCamelCase : Any = win_length
UpperCamelCase : Dict = win_function
UpperCamelCase : Any = frame_signal_scale
UpperCamelCase : str = fmin
UpperCamelCase : int = fmax
UpperCamelCase : Dict = mel_floor
UpperCamelCase : Any = reduction_factor
UpperCamelCase : List[str] = win_length * sampling_rate // 1000
UpperCamelCase : Union[str, Any] = hop_length * sampling_rate // 1000
UpperCamelCase : Tuple = optimal_fft_length(self.sample_size )
UpperCamelCase : int = (self.n_fft // 2) + 1
UpperCamelCase : Any = window_function(window_length=self.sample_size, name=self.win_function, periodic=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='slaney', mel_scale='slaney', )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCamelCase : Dict = np.array(SCREAMING_SNAKE_CASE_, np.intaa )
UpperCamelCase : int = []
for vector, length in zip(SCREAMING_SNAKE_CASE_, attention_mask.sum(-1 ) ):
UpperCamelCase : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[Any] = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCamelCase : int = spectrogram(
SCREAMING_SNAKE_CASE_, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel='log10', )
return log_mel_spec.T
def __call__( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
UpperCamelCase : Dict = self._process_audio(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCamelCase : str = None
if audio_target is not None:
UpperCamelCase : str = self._process_audio(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if inputs is None:
return inputs_target
else:
UpperCamelCase : Dict = inputs_target['input_values']
UpperCamelCase : str = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCamelCase : str = decoder_attention_mask
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature:
UpperCamelCase : Any = isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
UpperCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Any = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : str = [speech]
# needed to make pad() work on spectrogram inputs
UpperCamelCase : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
UpperCamelCase : Optional[int] = [self._extract_mel_features(SCREAMING_SNAKE_CASE_ ) for waveform in speech]
UpperCamelCase : Union[str, Any] = BatchFeature({'input_values': features} )
UpperCamelCase : List[str] = self.num_mel_bins
else:
UpperCamelCase : Dict = BatchFeature({'input_values': speech} )
UpperCamelCase : Tuple = self.pad(
SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : int = feature_size_hack
# convert input values to correct format
UpperCamelCase : Optional[int] = padded_inputs['input_values']
if not isinstance(input_values[0], np.ndarray ):
UpperCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for array in input_values]
elif (
not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray )
and isinstance(input_values[0], np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCamelCase : Optional[int] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCamelCase : Dict = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCamelCase : Dict = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase : int = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCamelCase : Dict = (
attention_mask
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['input_values'], attention_mask=SCREAMING_SNAKE_CASE_, padding_value=self.padding_value )
if return_tensors is not None:
UpperCamelCase : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
def snake_case_ ( self ) -> Dict[str, Any]:
UpperCamelCase : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCamelCase : Any = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 103 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCAmelCase ( *SCREAMING_SNAKE_CASE :Optional[int] , **SCREAMING_SNAKE_CASE :int ) -> Any:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase ):
__UpperCamelCase : Optional[int] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Dict =ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any ) -> Any:
'''simple docstring'''
_a : int =object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE , {
"""score""": ANY(SCREAMING_SNAKE_CASE ),
"""label""": ANY(SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE ), """ymin""": ANY(SCREAMING_SNAKE_CASE ), """xmax""": ANY(SCREAMING_SNAKE_CASE ), """ymax""": ANY(SCREAMING_SNAKE_CASE )},
} , )
import datasets
_a : List[Any] =datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_a : Optional[int] =[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
_a : Optional[int] =object_detector(SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE , {
"""score""": ANY(SCREAMING_SNAKE_CASE ),
"""label""": ANY(SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE ), """ymin""": ANY(SCREAMING_SNAKE_CASE ), """xmax""": ANY(SCREAMING_SNAKE_CASE ), """ymax""": ANY(SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
pass
@require_torch
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : Optional[int] ="""hf-internal-testing/tiny-detr-mobilenetsv3"""
_a : List[str] =AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE )
_a : int =AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
_a : List[Any] =ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
] , )
_a : Optional[int] =object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
],
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
_a : int ="""facebook/detr-resnet-50"""
_a : Any =AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Dict =AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Dict =ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
_a : str =object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : Dict ="""facebook/detr-resnet-50"""
_a : List[str] =pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE )
_a : Any =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
_a : List[Any] =object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : List[str] =0.9_985
_a : int ="""facebook/detr-resnet-50"""
_a : Optional[Any] =pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE )
_a : List[Any] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""Narsil/layoutlmv3-finetuned-funsd"""
_a : str =0.9_993
_a : List[str] =pipeline("""object-detection""" , model=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}},
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}},
] , )
| 276 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A : List[Any] = 'base_with_context'
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCamelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Any = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : int = ly_weight['''attention''']
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Tuple = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : str = ly_weight['''attention''']
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ : List[Any] = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = ly_weight['''self_attention''']
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = ly_weight['''MultiHeadDotProductAttention_0''']
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase )
lowerCamelCase__ : List[str] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCamelCase__ : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCamelCase__ : Optional[Any] = inference.parse_training_gin_file(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase )
lowerCamelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCamelCase__ : str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ : Optional[int] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : int = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCamelCase__ : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase , continuous_encoder=UpperCAmelCase , decoder=UpperCAmelCase , scheduler=UpperCAmelCase , melgan=UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_A : Tuple = parser.parse_args()
main(args)
| 265 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCamelCase , lowerCamelCase :int = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCamelCase :Tuple = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCamelCase :Union[str, Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase :str = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 206 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[Any] = 10
def _a (self ):
A_ : Dict = [1, 2, 3, 4]
A_ : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[str] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A_, A_ : Dict = process_story(lowercase )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[int] = """"""
A_, A_ : List[str] = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A_, A_ : int = process_story(lowercase )
A_ : Optional[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowercase , lowercase )
A_ : Dict = ["""It was the best of times."""]
self.assertEqual(lowercase , lowercase )
def _a (self ):
A_ : Optional[int] = torch.tensor([1, 2, 3, 4] )
A_ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def _a (self ):
A_ : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def _a (self ):
A_ : List[Any] = 101
A_ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Dict = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase )
| 206 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Dict = """big_bird"""
def __init__( self : str , UpperCAmelCase_ : Dict=50_358 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[Any]="gelu_new" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=4_096 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=66 , UpperCAmelCase_ : str="block_sparse" , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str , ) ->Dict:
"""simple docstring"""
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = rescale_embeddings
snake_case_ = attention_type
snake_case_ = use_bias
snake_case_ = block_size
snake_case_ = num_random_blocks
snake_case_ = classifier_dropout
class __A (snake_case__):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
snake_case_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 361 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[str] = """canine"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=16_384 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=0XE000 , UpperCAmelCase_ : Optional[int]=0XE001 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : Dict=16_384 , UpperCAmelCase_ : Optional[int]=128 , **UpperCAmelCase_ : Any , ) ->int:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Character config:
snake_case_ = downsampling_rate
snake_case_ = upsampling_kernel_size
snake_case_ = num_hash_functions
snake_case_ = num_hash_buckets
snake_case_ = local_transformer_stride
| 233 | 0 |
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 314 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__: List[Any] = logging.getLogger()
def UpperCamelCase__( )->Union[str, Any]:
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'''run_glue_deebert.py''' )
with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCamelCase,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self ):
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
| 193 | 0 |
"""simple docstring"""
_lowerCAmelCase : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase : Optional[int] = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 361 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_5_5 , A : bool = True , A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A : List[Any] , ):
super().__init__(**A )
_UpperCAmelCase : int = size if size is not None else {"shortest_edge": 2_2_4}
_UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_UpperCAmelCase : Union[str, Any] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Tuple = resample
_UpperCAmelCase : List[Any] = do_center_crop
_UpperCAmelCase : List[Any] = crop_size
_UpperCAmelCase : Tuple = do_rescale
_UpperCAmelCase : List[Any] = rescale_factor
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self : Optional[int] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ):
_UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCAmelCase : Any = int((2_5_6 / 2_2_4) * size["shortest_edge"] )
_UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(A , size=A , default_to_square=A )
_UpperCAmelCase : str = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
A , size=(size_dict["height"], size_dict["width"]) , resample=A , data_format=A , **A )
def snake_case_ ( self : int , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def snake_case_ ( self : List[str] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ):
return rescale(A , scale=A , data_format=A , **A )
def snake_case_ ( self : List[str] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : str , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_ ( self : Tuple , A : ImageInput , A : Optional[bool] = None , A : Optional[Dict[str, int]] = None , A : PILImageResampling = None , A : Optional[bool] = None , A : Optional[Dict[str, int]] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, Iterable[float]]] = None , A : Optional[Union[float, Iterable[float]]] = None , A : Optional[TensorType] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Dict , ):
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = resample if resample is not None else self.resample
_UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : Optional[int] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[str] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : List[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : Any = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : str = [self.resize(A , A , A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Dict = [self.center_crop(A , A ) for image in images]
if do_rescale:
_UpperCAmelCase : Dict = [self.rescale(A , A ) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[int] = [self.normalize(A , A , A ) for image in images]
_UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
| 202 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __SCREAMING_SNAKE_CASE ( A_ ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 106 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : List[str] = '''true'''
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ):
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = RegressionModel()
lowerCAmelCase__ : Optional[int] = deepcopy(A_ )
lowerCAmelCase__ : Any = RegressionDataset(length=A_ )
lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A_ ):
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = dataset.map(
A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A_ ):
if use_longest:
return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ )
lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = []
for batch in dataloader:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ ,lowerCAmelCase__ : int = [], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'
def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ):
lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ )
# First do baseline
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no''']
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
lowerCAmelCase__ : Optional[int] = model(**A_ )
lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch['''labels'''] )
lowerCAmelCase__ : Dict = metric.compute()
# Then do distributed
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : Union[str, Any] = model(**A_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : int = batch['''labels''']
lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
lowerCAmelCase__ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ : List[str] = Accelerator()
test_torch_metrics(A_ , 5_12 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( A_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 106 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "roformer"
def __init__( self: Optional[int] ,lowerCamelCase_: Tuple=50000 ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: Optional[Any]=3072 ,lowerCamelCase_: int="gelu" ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Any=1536 ,lowerCamelCase_: str=2 ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-12 ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Any=False ,lowerCamelCase_: Union[str, Any]=True ,**lowerCamelCase_: List[str] ,) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = rotary_value
UpperCAmelCase_ : str = use_cache
class _snake_case ( __snake_case ):
'''simple docstring'''
@property
def A__ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
UpperCAmelCase_ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 59 |
def lowerCamelCase_ ( _a : int = 50 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 59 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A_ : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=None , lowercase__=True , lowercase__=True , lowercase__=None , ) -> Union[str, Any]:
__UpperCAmelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = size
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_convert_rgb
__UpperCAmelCase = [512, 1_024, 2_048, 4_096]
__UpperCAmelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowerCAmelCase_ (self ) -> str:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__UpperCAmelCase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase_ (self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.image_processor_tester.prepare_dummy_image()
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
__UpperCAmelCase = 2_048
__UpperCAmelCase = image_processor(__snake_case , return_tensors='''pt''' , max_patches=__snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase_ (self ) -> Tuple:
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__UpperCAmelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCAmelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCAmelCase = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ (self ) -> Tuple:
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__UpperCAmelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__UpperCAmelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__snake_case ):
__UpperCAmelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
__UpperCAmelCase = '''Hello'''
__UpperCAmelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case , header_text=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCAmelCase = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case , header_text=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ (self ) -> Optional[int]:
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
__UpperCAmelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCAmelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCAmelCase = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ (self ) -> Optional[Any]:
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__UpperCAmelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCAmelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCAmelCase = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class A_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__UpperCAmelCase = 3
@property
def lowerCAmelCase_ (self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_convert_rgb''' ) )
def lowerCAmelCase_ (self ) -> Tuple:
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__UpperCAmelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCAmelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCAmelCase = image_processor(
__snake_case , return_tensors='''pt''' , max_patches=__snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 333 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : Any =logging.get_logger(__name__)
A__ : List[Any] ='''▁'''
A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
A__ : Union[str, Any] ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
A__ : Dict ={
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = VOCAB_FILES_NAMES
_lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase: str = ['''input_ids''', '''attention_mask''']
_lowercase: List[int] = []
_lowercase: List[int] = []
def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : List[Any] ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Dict , __snake_case : str ) -> None:
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
_lowerCAmelCase = self.convert_tokens_to_ids(__snake_case )
_lowerCAmelCase = tgt_lang_id
return inputs
def lowercase__ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str:
_lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip()
return out_string
def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding:
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowercase__ ( self : str ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : str , __snake_case : int ) -> None:
_lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
def lowercase__ ( self : Any , __snake_case : str ) -> None:
_lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
| 70 | 0 |
def lowerCamelCase_ (UpperCamelCase__ : Optional[int]=2_8123 ):
_UpperCAmelCase : Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_UpperCAmelCase : Optional[int] = set()
_UpperCAmelCase : int = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 368 |
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase_ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase_ = DisjunctiveConstraint(lowercase )
self.assertTrue(isinstance(dc.token_ids , lowercase ) )
with self.assertRaises(lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase ):
DisjunctiveConstraint(lowercase ) # fails here
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase_ = DisjunctiveConstraint(lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(3 )
lowerCamelCase_ = stepped is True and completed is True and reset is False
self.assertTrue(lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase_ = DisjunctiveConstraint(lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 19 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase )
snake_case : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase ( A_ ):
A__ : Any = "sigmoid"
A__ : str = "softmax"
A__ : int = "none"
@add_end_docstrings(
A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,)
class UpperCAmelCase ( A_ ):
A__ : int = False
A__ : Union[str, Any] = ClassificationFunction.NONE
def __init__(self : List[str] , **snake_case__ : int ) -> str:
'''simple docstring'''
super().__init__(**snake_case__ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = tokenizer_kwargs
snake_case : List[Any] = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
snake_case : Optional[int] = self.model.config.return_all_scores
if isinstance(snake_case__ , snake_case__ ) or top_k is None:
snake_case : List[Any] = top_k
snake_case : str = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , )
if return_all_scores:
snake_case : List[str] = None
else:
snake_case : Optional[int] = 1
if isinstance(snake_case__ , snake_case__ ):
snake_case : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case : Optional[int] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case : Tuple = "top_k" not in kwargs
if isinstance(args[0] , snake_case__ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]:
'''simple docstring'''
snake_case : int = self.framework
if isinstance(snake_case__ , snake_case__ ):
return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ )
elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.model(**snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case : Tuple = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
snake_case : Tuple = self.model.config.function_to_apply
else:
snake_case : int = ClassificationFunction.NONE
snake_case : Any = model_outputs["logits"][0]
snake_case : List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case : Optional[Any] = sigmoid(snake_case__ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case : Union[str, Any] = softmax(snake_case__ )
elif function_to_apply == ClassificationFunction.NONE:
snake_case : Optional[Any] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k is not None:
snake_case : Optional[int] = dict_scores[:top_k]
return dict_scores
| 59 | 0 |
from __future__ import annotations
from fractions import Fraction
def __UpperCamelCase ( _A : int , _A : int ) ->bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __UpperCamelCase ( _A : int ) ->list[str]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =11
lowerCamelCase_ =int("""1""" + """0""" * digit_len )
for num in range(_A , _A ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_A , _A ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
lowerCamelCase_ =10
return solutions
def __UpperCamelCase ( _A : int = 2 ) ->int:
"""simple docstring"""
lowerCamelCase_ =1.0
for fraction in fraction_list(_A ):
lowerCamelCase_ =Fraction(_A )
result *= frac.denominator / frac.numerator
return int(_A )
if __name__ == "__main__":
print(solution())
| 49 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )-> Dict:
lowerCamelCase_ =None
lowerCamelCase_ =20
lowerCamelCase_ =self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
lowerCamelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ =jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )-> Any:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create ramp distribution
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ =5
lowerCamelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ =top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ =np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ =ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
lowerCamelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ =5
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =15
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ =1
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> int:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =5
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ =4
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 105 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase_ = True
lowerCAmelCase_ = "ml.p3.2xlarge"
lowerCAmelCase_ = "accelerate_sagemaker_execution_role"
lowerCAmelCase_ = "hf-sm"
lowerCAmelCase_ = "us-east-1"
lowerCAmelCase_ = 1
lowerCAmelCase_ = "accelerate-sagemaker-1"
lowerCAmelCase_ = "1.6"
lowerCAmelCase_ = "4.4"
lowerCAmelCase_ = "train.py"
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _lowercase )
assert isinstance(converted_args["""do_train"""] , _lowercase )
assert isinstance(converted_args["""epochs"""] , _lowercase )
assert isinstance(converted_args["""learning_rate"""] , _lowercase )
assert isinstance(converted_args["""max_steps"""] , _lowercase )
with pytest.raises(_lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 219 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase__ : Tuple = re.compile(R"\s+")
def __UpperCamelCase ( _UpperCAmelCase ):
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase, "", example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = [len(_UpperCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : int = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=5 ):
__UpperCAmelCase : Optional[Any] = ["auto-generated", "autogenerated", "automatically generated"]
__UpperCAmelCase : Optional[Any] = example["content"].splitlines()
for _, line in zip(range(_UpperCAmelCase ), _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=5, _UpperCAmelCase=0.05 ):
__UpperCAmelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
__UpperCAmelCase : List[str] = example["content"].splitlines()
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[int] = 0
# first test
for _, line in zip(range(_UpperCAmelCase ), _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__UpperCAmelCase : Union[str, Any] = example["content"].count("\n" )
__UpperCAmelCase : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[str] = ["def ", "class ", "for ", "while "]
__UpperCAmelCase : Tuple = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase=4 ):
__UpperCAmelCase : Dict = example["content"].splitlines()
__UpperCAmelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Tuple = tokenizer(example["content"], truncation=_UpperCAmelCase )["input_ids"]
__UpperCAmelCase : Dict = len(example["content"] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : int = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if not check_uniques(_UpperCAmelCase, _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCamelCase ( _UpperCAmelCase ):
with open(_UpperCAmelCase, "rb" ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + ".gz", "wb", compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase, _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowerCAmelCase__ : List[Any] = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase__ : str = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ : Optional[int] = multiprocessing.cpu_count()
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase__ : List[Any] = time.time()
lowerCAmelCase__ : Dict = load_dataset(args.dataset_name, split="train")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCAmelCase__ : Tuple = time.time()
lowerCAmelCase__ : Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCAmelCase__ : Any = set(ds.unique("hash"))
lowerCAmelCase__ : Optional[int] = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCAmelCase__ : Tuple = time.time()
lowerCAmelCase__ : List[Any] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase__ : Dict = time.time()
lowerCAmelCase__ : Any = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCAmelCase__ : Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase__ : Any = output_dir / "data"
data_dir.mkdir(exist_ok=True)
lowerCAmelCase__ : Union[str, Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase__ : Union[str, Any] = str(data_dir / f"file-{file_number+1:012}.json")
lowerCAmelCase__ : Any = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 362 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Tuple = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A_ :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =tempfile.mkdtemp()
__UpperCamelCase : Optional[int] =8
# DPR tok
__UpperCamelCase : str =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Optional[Any] =os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__UpperCamelCase : Dict =os.path.join(lowerCamelCase__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase : Optional[int] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase : str =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Optional[int] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase : Any ={'unk_token': '<unk>'}
__UpperCamelCase : Any =os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__UpperCamelCase : Any =os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Dict =os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowercase ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =os.path.join(self.tmpdirname , 'rag_tokenizer' )
__UpperCamelCase : Dict =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__UpperCamelCase : List[Any] =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase__ )
rag_tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =RagTokenizer.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__UpperCamelCase : Union[str, Any] =[
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__UpperCamelCase : int =tokenizer(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__UpperCamelCase : Union[str, Any] =[
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__UpperCamelCase : Any =tokenizer(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 71 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( __UpperCamelCase ):
lowercase : List[str] = ["""image_processor""", """tokenizer"""]
lowercase : Any = """LayoutLMv2ImageProcessor"""
lowercase : Optional[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = kwargs.pop("""feature_extractor""" )
UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
UpperCamelCase : List[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase : Any = features["""words"""]
UpperCamelCase : str = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# add pixel values
UpperCamelCase : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase : Optional[Any] = self.get_overflowing_images(SCREAMING_SNAKE_CASE_ , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase : List[str] = images
return encoded_inputs
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f' {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}' )
return images_with_overflow
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def a_ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def a_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 355 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : Any = AudioLDMPipeline
lowercase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowercase : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase : Tuple = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase : Optional[int] = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a_ ( self ):
UpperCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : int = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Tuple = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[str] = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
UpperCamelCase : Tuple = prompt_embeds
# forward
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * ["""this is a negative prompt"""]
UpperCamelCase : List[Any] = negative_prompt
UpperCamelCase : str = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : str = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
UpperCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCamelCase : int = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Union[str, Any] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
embeds.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Tuple = embeds
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = """egg cracking"""
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Union[str, Any] = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase : List[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase : Dict = 2
UpperCamelCase : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase : Any = 2
UpperCamelCase : str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.032
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = ["""hey"""]
UpperCamelCase : Dict = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : str = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase : str = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a_ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@slow
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a_ ( self ):
UpperCamelCase : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 25
UpperCamelCase : Optional[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[7_7230:7_7240]
UpperCamelCase : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a_ ( self ):
UpperCamelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase : str = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[2_7780:2_7790]
UpperCamelCase : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 27 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
"""simple docstring"""
a : bool =field(default=lowercase__ , metadata={"help": "Whether to use SortishSampler or not."} )
a : bool =field(
default=lowercase__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a : Optional[int] =field(
default=lowercase__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
a : Optional[int] =field(
default=lowercase__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
a : Optional[Union[str, Path, GenerationConfig]] =field(
default=lowercase__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = super().to_dict()
for k, v in d.items():
if isinstance(_a , _a ):
lowerCAmelCase : Tuple = v.to_dict()
return d
| 108 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """unispeech"""
def __init__( self, lowerCamelCase=32, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=1E-5, lowerCamelCase="group", lowerCamelCase="gelu", lowerCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12), lowerCamelCase=(5, 2, 2, 2, 2, 2, 2), lowerCamelCase=(10, 3, 3, 3, 3, 2, 2), lowerCamelCase=False, lowerCamelCase=1_28, lowerCamelCase=16, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=0.0_5, lowerCamelCase=10, lowerCamelCase=2, lowerCamelCase=0.0, lowerCamelCase=10, lowerCamelCase=0, lowerCamelCase=3_20, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=1_00, lowerCamelCase=2_56, lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase="mean", lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2_56, lowerCamelCase=80, lowerCamelCase=0, lowerCamelCase=1, lowerCamelCase=2, lowerCamelCase=0.5, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase, pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase)
_lowercase : Optional[Any] = hidden_size
_lowercase : List[str] = feat_extract_norm
_lowercase : Dict = feat_extract_activation
_lowercase : List[str] = list(lowerCamelCase)
_lowercase : List[str] = list(lowerCamelCase)
_lowercase : Any = list(lowerCamelCase)
_lowercase : Optional[Any] = conv_bias
_lowercase : List[Any] = num_conv_pos_embeddings
_lowercase : Dict = num_conv_pos_embedding_groups
_lowercase : Any = len(self.conv_dim)
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Any = num_attention_heads
_lowercase : Optional[Any] = hidden_dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : List[str] = feat_proj_dropout
_lowercase : int = final_dropout
_lowercase : Optional[int] = layerdrop
_lowercase : Dict = layer_norm_eps
_lowercase : Dict = initializer_range
_lowercase : Optional[int] = num_ctc_classes
_lowercase : List[str] = vocab_size
_lowercase : Optional[Any] = do_stable_layer_norm
_lowercase : List[str] = use_weighted_layer_sum
_lowercase : Any = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase : str = apply_spec_augment
_lowercase : Tuple = mask_time_prob
_lowercase : Dict = mask_time_length
_lowercase : Tuple = mask_time_min_masks
_lowercase : List[Any] = mask_feature_prob
_lowercase : Tuple = mask_feature_length
_lowercase : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowercase : List[Any] = num_codevectors_per_group
_lowercase : Optional[int] = num_codevector_groups
_lowercase : str = contrastive_logits_temperature
_lowercase : Tuple = feat_quantizer_dropout
_lowercase : Dict = num_negatives
_lowercase : Dict = codevector_dim
_lowercase : Union[str, Any] = proj_codevector_dim
_lowercase : int = diversity_loss_weight
# ctc loss
_lowercase : Union[str, Any] = ctc_loss_reduction
_lowercase : Optional[Any] = ctc_zero_infinity
# pretraining loss
_lowercase : str = replace_prob
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1)
| 84 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[str] = CTRLTokenizer
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[int] = False
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : List[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Optional[int] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase : Union[str, Any] = {'unk_token': '<unk>'}
_lowercase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase))
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = 'adapt react readapt apt'
_lowercase : Tuple = 'adapt react readapt apt'
return input_text, output_text
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase : List[str] = 'adapt react readapt apt'
_lowercase : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase : Optional[Any] = tokenizer.tokenize(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = tokens + [tokenizer.unk_token]
_lowercase : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase), lowerCamelCase)
| 84 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case_ = TypeVar('''T''')
snake_case_ = TypeVar('''U''')
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
def __init__( self , a , a):
lowercase__ : List[Any] = key
lowercase__ : List[Any] = val
lowercase__ : DoubleLinkedListNode[T, U] | None = None
lowercase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self):
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next)}, has prev: {bool(self.prev)}"""
)
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
def __init__( self):
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a)
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a)
lowercase__ , lowercase__ : Union[str, Any] = self.rear, self.head
def __repr__( self):
lowercase__ : Any = ['DoubleLinkedList']
lowercase__ : List[str] = self.head
while node.next is not None:
rep.append(str(a))
lowercase__ : Tuple = node.next
rep.append(str(self.rear))
return ",\n ".join(a)
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase__ : Dict = node
lowercase__ : int = previous
lowercase__ : Union[str, Any] = node
lowercase__ : Optional[int] = self.rear
def snake_case_ ( self , a):
if node.prev is None or node.next is None:
return None
lowercase__ : Union[str, Any] = node.next
lowercase__ : Tuple = node.prev
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = None
return node
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
__lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , a):
lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase__ : Optional[Any] = capacity
lowercase__ : Union[str, Any] = 0
lowercase__ : Tuple = 0
lowercase__ : int = 0
lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self):
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , a):
return key in self.cache
def snake_case_ ( self , a):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase__ : str = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a)
return node.val
self.miss += 1
return None
def snake_case_ ( self , a , a):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase__ : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase__ : Optional[Any] = DoubleLinkedListNode(a , a)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase__ : Any = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
lowercase__ : Union[str, Any] = value
self.list.add(a)
@classmethod
def snake_case_ ( cls , a = 128):
def cache_decorator_inner(a) -> Callable[..., U]:
def cache_decorator_wrapper(*a) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase__ : Dict = LRUCache(a)
lowercase__ : str = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
lowercase__ : str = func(*a)
cls.decorator_function_to_instance_map[func].put(args[0] , a)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a , 'cache_info' , a) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case_ = logging.getLogger(__name__)
snake_case_ = 50 # max width of layer names
snake_case_ = 70 # max width of quantizer names
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : str = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=SCREAMING_SNAKE_CASE_ , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=SCREAMING_SNAKE_CASE_ , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=SCREAMING_SNAKE_CASE_ , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=SCREAMING_SNAKE_CASE_ , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=SCREAMING_SNAKE_CASE_ , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
if args.calibrator == "max":
lowercase__ : Optional[int] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
lowercase__ : Optional[Any] = 'histogram'
elif args.calibrator == "mse":
lowercase__ : int = 'histogram'
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
lowercase__ : Any = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Any=False ):
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , ['embeddings'] , which='weight' , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [''] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE_ )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE_ )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
def fusea(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE_ , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
lowercase__ : List[Any] = qq._amax.detach().item()
lowercase__ : Optional[int] = qk._amax.detach().item()
lowercase__ : List[str] = qv._amax.detach().item()
lowercase__ : Tuple = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
qq._amax.fill_(SCREAMING_SNAKE_CASE_ )
qk._amax.fill_(SCREAMING_SNAKE_CASE_ )
qv._amax.fill_(SCREAMING_SNAKE_CASE_ )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
lowercase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
lowercase__ : Union[str, Any] = mod.weight.shape[0]
lowercase__ : str = mod._weight_quantizer._amax.detach()
lowercase__ : Any = torch.ones(SCREAMING_SNAKE_CASE_ , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowercase__ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowercase__ : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
lowercase__ : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE_ , keepdims=SCREAMING_SNAKE_CASE_ ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowercase__ : Union[str, Any] = amax
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=25 , SCREAMING_SNAKE_CASE_ : Any=180 , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
'''simple docstring'''
if ignore is None:
lowercase__ : Tuple = []
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : str = [ignore]
lowercase__ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
continue
lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
for name, mod in model.named_modules():
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , '_input_quantizer' , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' , SCREAMING_SNAKE_CASE_ )
if not hasattr(SCREAMING_SNAKE_CASE_ , 'weight' ):
continue
if type(SCREAMING_SNAKE_CASE_ ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE_ ) is str and s in name]:
continue
lowercase__ : Optional[int] = f"""Act:{input_q.extra_repr()}"""
lowercase__ : Dict = f"""Wgt:{weight_q.extra_repr()}"""
lowercase__ : Tuple = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE_ ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE_ )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : List[str] = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="both" , **SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : Optional[int] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '_input_quantizer' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '_weight_quantizer' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE_ , '_input_quantizer' ) or hasattr(SCREAMING_SNAKE_CASE_ , '_weight_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
set_quantizers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Any = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(SCREAMING_SNAKE_CASE_ )
| 214 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return lst
_snake_case : List[Any] = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_snake_case , _snake_case : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_snake_case : Dict = 1
return lst
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 132 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = WavaVecaPhonemeCTCTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[Any] = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_snake_case : List[str] = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : Union[str, Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: List[str]=False, a_: str=20, a_: Optional[int]=5 ):
'''simple docstring'''
_snake_case : Union[str, Any] = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=a_ )) for i in range(len(a_ ) )]
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], do_phonemize=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Dict = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : str = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Optional[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : str = """ """ + output_txt
_snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int, **a_: List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_snake_case : Optional[Any] = tokenizer("""m xxx ɪ""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_snake_case : Union[str, Any] = tokenizer("""m aaa ɪ ccc""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case : Any = tokenizer("""maɪ c""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [3, 200] ) # mai should be <unk> (=3)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Tuple = """Hello how are you"""
_snake_case : Optional[int] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(a_, """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : int = """Hello how are you"""
_snake_case : int = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a_ ).input_ids, tokenizer(a_, do_phonemize=a_ ).input_ids )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Tuple = """Hello how are you"""
_snake_case : List[str] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Optional[Any] = tokenizer.decode(tokenizer(a_ ).input_ids )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_snake_case : List[Any] = tokenizer.decode(sample_ids[0] )
_snake_case : str = tokenizer.batch_decode(a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : Union[str, Any] = """Hello how are you"""
_snake_case : Tuple = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(a_, """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : int = """Hello how are you"""
_snake_case : Dict = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a_ ).input_ids, tokenizer(a_, do_phonemize=a_ ).input_ids )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_snake_case : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_snake_case : List[str] = tokenizer.decode(sample_ids[0] )
_snake_case : Optional[int] = tokenizer.batch_decode(a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_snake_case : int = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=a_ )
_snake_case : Dict = tokenizer.batch_decode(a_, filter_word_delimiter_token=a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : str = """Hello how are you"""
_snake_case : List[Any] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Union[str, Any] = tokenizer.decode(tokenizer(a_ ).input_ids, filter_word_delimiter_token=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : Any = """Hello how are you"""
_snake_case : Any = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Any = tokenizer.decode(tokenizer(a_ ).input_ids, filter_word_delimiter_token=a_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip(), a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token=a_ )
_snake_case : str = """Hello how are you"""
_snake_case : str = tokenizer(a_, phonemizer_lang="""en-us""" ).input_ids
_snake_case : List[str] = tokenizer(a_, phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a_, a_ )
_snake_case : Dict = tokenizer.decode(a_ )
_snake_case : Union[str, Any] = tokenizer.decode(a_ )
self.assertEqual(a_, """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a_, """ɛ l o h aʊ a ʁ j u""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Optional[Any] = """Hello how Are you"""
_snake_case : Optional[int] = """hello how are you"""
_snake_case : List[str] = tokenizer(a_ ).input_ids
_snake_case : int = tokenizer(a_ ).input_ids
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_snake_case : List[Any] = tokenizer.batch_decode(a_ )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCamelCase_ ( a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case : Optional[int] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_snake_case : Any = tokenizer.decode(a_, output_char_offsets=a_, filter_word_delimiter_token=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ), 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a_, a_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""], """char""" ) ), outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """char""" ), ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """start_offset""" ), [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """end_offset""" ), [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a_: List[str], a_: Optional[Any] ):
self.assertTrue(isinstance(a_, a_ ) )
self.assertTrue(isinstance(outputs_list[0], a_ ) )
# transform list to ModelOutput
_snake_case : int = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""], outputs_batch_a["""text"""] )
def recursive_check(a_: Union[str, Any], a_: Optional[Any] ):
if isinstance(a_, a_ ):
[recursive_check(a_, a_ ) for la, la in zip(a_, a_ )]
self.assertEqual(a_, a_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""], outputs_batch_a["""char_offsets"""] )
# fmt: off
_snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case : int = tokenizer.batch_decode(a_, output_char_offsets=a_ )
_snake_case : Union[str, Any] = [tokenizer.decode(a_, output_char_offsets=a_ ) for ids in sample_ids]
check_list_tuples_equal(a_, a_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[Any] = tokenizer.vocab_size
_snake_case : int = len(a_ )
self.assertNotEqual(a_, 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_snake_case : Any = tokenizer.add_tokens(a_ )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : List[str] = len(a_ )
self.assertNotEqual(a_, 0 )
self.assertEqual(a_, a_ )
self.assertEqual(a_, len(a_ ) )
self.assertEqual(a_, all_size + len(a_ ) )
_snake_case : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""", add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ), 4 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
_snake_case : Optional[int] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_snake_case : Dict = tokenizer.add_special_tokens(a_ )
_snake_case : List[str] = tokenizer.vocab_size
_snake_case : List[Any] = len(a_ )
self.assertNotEqual(a_, 0 )
self.assertEqual(a_, a_ )
self.assertEqual(a_, len(a_ ) )
self.assertEqual(a_, all_size_a + len(a_ ) )
_snake_case : Dict = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""", add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ), 6 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0], tokens[1] )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokens[-4] )
self.assertEqual(tokens[0], tokenizer.eos_token_id )
self.assertEqual(tokens[-3], tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : str = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(output["""text"""], a_ )
| 132 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = ReformerTokenizer
lowerCamelCase__ : Tuple = ReformerTokenizerFast
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Union[str, Any] = True
def _UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
lowercase__ : Union[str, Any] = ReformerTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = '<s>'
lowercase__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 1_0_0_0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def _UpperCAmelCase ( self ) -> int:
if not self.test_rust_tokenizer:
return
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Dict = self.get_rust_tokenizer()
lowercase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowercase__ : Dict = tokenizer.tokenize(a )
lowercase__ : Tuple = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ : str = tokenizer.encode(a , add_special_tokens=a )
lowercase__ : Tuple = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ : Any = self.get_rust_tokenizer()
lowercase__ : Optional[int] = tokenizer.encode(a )
lowercase__ : Tuple = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _UpperCAmelCase ( self , a=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
lowercase__ : Tuple = 'This is a simple input'
lowercase__ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Optional[int] = ('This is a simple input', 'This is a pair')
lowercase__ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = ReformerTokenizer(a , keep_accents=a )
lowercase__ : str = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowercase__ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ : int = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = 'Hello World!'
lowercase__ : Optional[int] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowercase__ : int = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowercase__ : List[Any] = ' '.join(a )
lowercase__ : List[Any] = self.big_tokenizer.encode_plus(a , return_tensors='pt' )
lowercase__ : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
lowercase__ : Optional[int] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase__ : Dict = encoded_sequence['input_ids'].shape
lowercase__ : Dict = ReformerModel(a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
# fmt: off
lowercase__ : Optional[Any] = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase__ : List[Any] = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=a , sequences=a , )
| 77 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Optional[Any] , A_ : Dict , A_ : Optional[Any]=2 , A_ : List[str]=True , A_ : Dict=False , A_ : Union[str, Any]=1_0 , A_ : Optional[Any]=3 , A_ : str=3_2 * 8 , A_ : List[str]=3_2 * 8 , A_ : Dict=4 , A_ : List[Any]=6_4 , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : int = use_auxiliary_loss
lowerCAmelCase_ : str = num_queries
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Union[str, Any] = min_size
lowerCAmelCase_ : Optional[int] = max_size
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : str = hidden_dim
lowerCAmelCase_ : List[str] = hidden_dim
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
A_)
lowerCAmelCase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A_)
lowerCAmelCase_ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A_) > 0.5
).float()
lowerCAmelCase_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=A_) > 0.5).long()
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ : Dict = self.num_queries
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : str = [1, 1, 1, 1]
lowerCAmelCase_ : Dict = self.num_channels
lowerCAmelCase_ : List[str] = 6_4
lowerCAmelCase_ : Union[str, Any] = 1_2_8
lowerCAmelCase_ : str = self.hidden_dim
lowerCAmelCase_ : Optional[Any] = self.hidden_dim
lowerCAmelCase_ : Any = self.hidden_dim
return config
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[Any] , A_ : Tuple):
lowerCAmelCase_ : Any = output.encoder_hidden_states
lowerCAmelCase_ : int = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , config.decoder_layers)
def UpperCAmelCase__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : str=False):
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = MaskaFormerModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : Any = model(A_ , output_hidden_states=A_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(A_ , A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Tuple , A_ : Any):
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(config=A_)
model.to(A_)
model.eval()
def comm_check_on_output(A_ : Optional[int]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : List[Any] = model(A_)
comm_check_on_output(A_)
lowerCAmelCase_ : Any = model(
pixel_values=A_ , pixel_mask=A_ , mask_labels=A_ , class_labels=A_)
comm_check_on_output(A_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_a = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Tuple = MaskaFormerModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Optional[int]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_)
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''')
def UpperCAmelCase__ ( self : Tuple):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase__ ( self : Tuple):
pass
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_)
lowerCAmelCase_ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
@slow
def UpperCAmelCase__ ( self : List[str]):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ : List[str] = MaskaFormerModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[int] = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : str = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A_),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=A_),
'''class_labels''': torch.zeros(2 , 1_0 , device=A_).long(),
}
lowerCAmelCase_ : Union[str, Any] = self.model_tester.get_config()
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(A_).to(A_)
lowerCAmelCase_ : Union[str, Any] = model(**A_)
self.assertTrue(outputs.loss is not None)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_).to(A_)
lowerCAmelCase_ : List[str] = model(**A_ , output_attentions=A_)
self.assertTrue(outputs.attentions is not None)
def UpperCAmelCase__ ( self : List[Any]):
if not self.model_tester.is_training:
return
lowerCAmelCase_ : Dict = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(A_)
model.to(A_)
model.train()
lowerCAmelCase_ : List[Any] = model(A_ , mask_labels=A_ , class_labels=A_).loss
loss.backward()
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Any = model_class(A_).to(A_)
model.train()
lowerCAmelCase_ : List[str] = model(A_ , mask_labels=A_ , class_labels=A_)
lowerCAmelCase_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
A__ : int = 1E-4
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : Optional[Any]):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(A_)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : Any = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**A_)
lowerCAmelCase_ : List[str] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[int] = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**A_)
# masks_queries_logits
lowerCAmelCase_ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
lowerCAmelCase_ : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase_ : Optional[Any] = torch.tensor(A_).to(A_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_))
# class_queries_logits
lowerCAmelCase_ : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
lowerCAmelCase_ : Any = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
]).to(A_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='''pt''' , )
lowerCAmelCase_ : Dict = inputs['''pixel_values'''].to(A_)
lowerCAmelCase_ : Tuple = [el.to(A_) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : str = [el.to(A_) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
self.assertTrue(outputs.loss is not None)
| 103 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
A : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =["""pixel_values"""]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = True , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = size if size is not None else {"shortest_edge": 2_24}
__lowerCAmelCase = get_size_dict(__a , default_to_square=__a )
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
__lowerCAmelCase = get_size_dict(__a , param_name="crop_size" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_flip_channel_order
def snake_case ( self , __a , __a , __a = PIL.Image.BILINEAR , __a = None , **__a , ):
__lowerCAmelCase = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
__lowerCAmelCase = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def snake_case ( self , __a , __a , __a = None , **__a , ):
__lowerCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def snake_case ( self , __a , __a , __a = None , **__a , ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def snake_case ( self , __a , __a = None ):
return flip_channel_order(__a , data_format=__a )
def snake_case ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__a , default_to_square=__a )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__a , param_name="crop_size" )
__lowerCAmelCase = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__a , scale=__a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__lowerCAmelCase = [self.flip_channel_order(image=__a ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__a , __a ) for image in images]
__lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__a ):
__lowerCAmelCase = target_sizes.numpy()
__lowerCAmelCase = []
for idx in range(len(__a ) ):
__lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__a )
__lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
__lowerCAmelCase = logits.argmax(dim=1 )
__lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 259 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : List[str] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""distilbert"""
__UpperCAmelCase : int ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 259 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.