code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class A__ ( unittest.TestCase ):
def a__ ( self : str ) -> Any:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str]=1 ) -> str:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def a__ ( self : Optional[int] , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , _UpperCAmelCase : WhisperForConditionalGeneration , _UpperCAmelCase : WhisperProcessor , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_UpperCAmelCase , speech_processor=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=1_60_00 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , **_UpperCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
__lowercase = self.speech_processor.feature_extractor(
_UpperCAmelCase , return_tensors='pt' , sampling_rate=_UpperCAmelCase ).input_features.to(self.device )
__lowercase = self.speech_model.generate(_UpperCAmelCase , max_length=48_00_00 )
__lowercase = self.speech_processor.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , normalize=_UpperCAmelCase )[
0
]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_UpperCAmelCase )}.""" )
# get prompt text embeddings
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowercase , __lowercase , __lowercase = text_embeddings.shape
__lowercase = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = 42
if negative_prompt is None:
__lowercase = [''] * batch_size
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="""
f""" {type(_UpperCAmelCase )}.""" )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
__lowercase = negative_prompt
__lowercase = text_input_ids.shape[-1]
__lowercase = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' , )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowercase = uncond_embeddings.shape[1]
__lowercase = uncond_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowercase = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
__lowercase = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(_UpperCAmelCase ).sample
__lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 688 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = DiTPipeline
lowerCAmelCase__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCAmelCase__ : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ : Optional[Any] = False
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_UpperCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_UpperCAmelCase , )
__lowercase = AutoencoderKL()
__lowercase = DDIMScheduler()
__lowercase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith('mps' ):
__lowercase = torch.manual_seed(_UpperCAmelCase )
else:
__lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__lowercase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = 'cpu'
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowercase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1e-3 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_UpperCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
def a__ ( self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = torch.manual_seed(0 )
__lowercase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowercase = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowercase = pipe.get_label_ids(_UpperCAmelCase )
__lowercase = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowercase = ['vase', 'umbrella']
__lowercase = pipe.get_label_ids(_UpperCAmelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 688 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
__lowercase = torch.exp(SCREAMING_SNAKE_CASE )
__lowercase = torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
__lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase = config.output_attentions
__lowercase = config.output_hidden_states
__lowercase = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__lowercase = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__lowercase = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self : List[Any] , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowercase = x
else:
__lowercase = x
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , ) -> List[str]:
"""simple docstring"""
__lowercase = ()
__lowercase = ()
__lowercase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = layer_outputs[0]
if self.output_attentions:
__lowercase = all_attentions + (layer_outputs[1],)
__lowercase = (hidden_states,)
if self.output_hidden_states:
__lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowercase = current_outputs + (all_attentions,)
__lowercase = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__lowercase = highway_exit[0]
__lowercase = entropy(_UpperCAmelCase )
__lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = (hidden_states,)
if self.output_hidden_states:
__lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
__lowercase = outputs + (all_attentions,)
__lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , lowerCAmelCase__ , )
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = BertEmbeddings(_UpperCAmelCase )
__lowercase = DeeBertEncoder(_UpperCAmelCase )
__lowercase = BertPooler(_UpperCAmelCase )
self.init_weights()
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.word_embeddings
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = value
def a__ ( self : Optional[Any] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__lowercase = input_ids.size()
elif inputs_embeds is not None:
__lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowercase = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__lowercase = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__lowercase = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowercase = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowercase = encoder_attention_mask[:, None, None, :]
__lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowercase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowercase = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__lowercase = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = message
__lowercase = exit_layer # start from 1!
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = BertPooler(_UpperCAmelCase )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self : Optional[Any] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowercase = bmodel_output[1]
__lowercase = self.dropout(_UpperCAmelCase )
__lowercase = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , lowerCAmelCase__ , )
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeBertModel(_UpperCAmelCase )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Tuple=-1 , _UpperCAmelCase : Dict=False , ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.num_layers
try:
__lowercase = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowercase = outputs[1]
__lowercase = self.dropout(_UpperCAmelCase )
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(_UpperCAmelCase )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 688 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
SCREAMING_SNAKE_CASE__ = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
SCREAMING_SNAKE_CASE__ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = VOCAB_FILES_NAMES
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[Any] = ConvBertTokenizer
def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]="[UNK]" , _UpperCAmelCase : str="[SEP]" , _UpperCAmelCase : Optional[int]="[PAD]" , _UpperCAmelCase : Any="[CLS]" , _UpperCAmelCase : List[Any]="[MASK]" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_UpperCAmelCase )
__lowercase = do_lower_case
def a__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=None ) -> Optional[int]:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 688 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str="" , _UpperCAmelCase : int="train" ) -> Tuple:
"""simple docstring"""
assert os.path.isdir(_UpperCAmelCase )
__lowercase = []
__lowercase = os.listdir(_UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ):
continue
self.documents.append(_UpperCAmelCase )
def __len__( self : List[str] ) -> List[str]:
"""simple docstring"""
return len(self.documents )
def __getitem__( self : int , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.documents[idx]
__lowercase = document_path.split('/' )[-1]
with open(_UpperCAmelCase , encoding='utf-8' ) as source:
__lowercase = source.read()
__lowercase , __lowercase = process_story(_UpperCAmelCase )
return document_name, story_lines, summary_lines
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase = list(filter(lambda SCREAMING_SNAKE_CASE : len(SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase = [_add_missing_period(SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
__lowercase = []
__lowercase = deque(SCREAMING_SNAKE_CASE )
while True:
try:
__lowercase = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase = list(filter(lambda SCREAMING_SNAKE_CASE : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> str:
__lowercase = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
if len(SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE )) )
return sequence
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> List[Any]:
__lowercase = torch.ones_like(SCREAMING_SNAKE_CASE )
__lowercase = sequence == pad_token_id
__lowercase = 0
return mask
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
__lowercase = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in story_lines]
__lowercase = [token for sentence in story_lines_token_ids for token in sentence]
__lowercase = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in summary_lines]
__lowercase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__lowercase = []
for sequence in batch:
__lowercase = -1
__lowercase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE )
return torch.tensor(SCREAMING_SNAKE_CASE )
| 688 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A__ ( lowerCAmelCase__ ):
def __init__( self : int , _UpperCAmelCase : Any=0.01 , _UpperCAmelCase : Optional[Any]=10_00 ) -> Dict:
"""simple docstring"""
__lowercase = p_stop
__lowercase = max_length
def __iter__( self : Dict ) -> int:
"""simple docstring"""
__lowercase = 0
__lowercase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowercase = random.random() < self.p_stop
class A__ ( unittest.TestCase ):
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=True ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [
BatchSamplerShard(_UpperCAmelCase , 2 , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
for i in range(2 )
]
__lowercase = [list(_UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_UpperCAmelCase ) for shard in batch_sampler_shards] , [len(_UpperCAmelCase ) for e in expected] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , even_batches=_UpperCAmelCase )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
__lowercase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = [[], []]
self.check_batch_sampler_shards(_UpperCAmelCase , _UpperCAmelCase , split_batches=_UpperCAmelCase , even_batches=_UpperCAmelCase )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowercase = [BatchSamplerShard(_UpperCAmelCase , 2 , _UpperCAmelCase , even_batches=_UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a__ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=False ) -> Any:
"""simple docstring"""
random.seed(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = [
IterableDatasetShard(
_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase , num_processes=_UpperCAmelCase , process_index=_UpperCAmelCase , split_batches=_UpperCAmelCase , )
for i in range(_UpperCAmelCase )
]
__lowercase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_UpperCAmelCase )
iterable_dataset_lists.append(list(_UpperCAmelCase ) )
__lowercase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowercase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
self.assertTrue(len(_UpperCAmelCase ) % shard_batch_size == 0 )
__lowercase = []
for idx in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_UpperCAmelCase ) < len(_UpperCAmelCase ):
reference += reference
self.assertListEqual(_UpperCAmelCase , reference[: len(_UpperCAmelCase )] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = 42
__lowercase = RandomIterableDataset()
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
# Edge case with a very small dataset
__lowercase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
self.check_iterable_dataset_shards(_UpperCAmelCase , _UpperCAmelCase , batch_size=4 , drop_last=_UpperCAmelCase , split_batches=_UpperCAmelCase )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = BatchSampler(range(16 ) , batch_size=4 , drop_last=_UpperCAmelCase )
__lowercase = SkipBatchSampler(_UpperCAmelCase , 2 )
self.assertListEqual(list(_UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowercase = skip_first_batches(_UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
Accelerator()
__lowercase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
import enum
import shutil
import sys
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = shutil.get_terminal_size()
SCREAMING_SNAKE_CASE__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = 1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]="" ) -> Union[str, Any]:
sys.stdout.write(str(SCREAMING_SNAKE_CASE ) + end )
sys.stdout.flush()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str="" ) -> str:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
forceWrite('\r' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ) -> List[str]:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 688 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE__ = 6378137.0
SCREAMING_SNAKE_CASE__ = 6356752.314245
SCREAMING_SNAKE_CASE__ = 637_8137
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
__lowercase = (AXIS_A - AXIS_B) / AXIS_A
__lowercase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowercase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowercase = radians(SCREAMING_SNAKE_CASE )
__lowercase = radians(SCREAMING_SNAKE_CASE )
# Equation
__lowercase = sin((phi_a - phi_a) / 2 )
__lowercase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowercase = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE ) * cos(SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "dinat"
lowerCAmelCase__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Optional[int]=64 , _UpperCAmelCase : List[str]=[3, 4, 6, 5] , _UpperCAmelCase : Any=[2, 4, 8, 16] , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _UpperCAmelCase : int=3.0 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Union[str, Any]=1e-5 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(_UpperCAmelCase )
__lowercase = num_heads
__lowercase = kernel_size
__lowercase = dilations
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
__lowercase = layer_scale_init_value
__lowercase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = KandinskyVaaPriorPipeline
lowerCAmelCase__ : int = ["prompt"]
lowerCAmelCase__ : Dict = ["prompt", "negative_prompt"]
lowerCAmelCase__ : Union[str, Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowerCAmelCase__ : Union[str, Any] = False
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return 32
@property
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return 32
@property
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__lowercase = PriorTransformer(**_UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__lowercase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__lowercase = CLIPVisionModelWithProjection(_UpperCAmelCase )
return model
@property
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
def a__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = self.dummy_prior
__lowercase = self.dummy_image_encoder
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_image_processor
__lowercase = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=_UpperCAmelCase , clip_sample_range=10.0 , )
__lowercase = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=0 ) -> int:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith('mps' ):
__lowercase = torch.manual_seed(_UpperCAmelCase )
else:
__lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__lowercase = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = 'cpu'
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_UpperCAmelCase )
__lowercase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__lowercase = output.image_embeds
__lowercase = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
__lowercase = image[0, -10:]
__lowercase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__lowercase = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = torch_device == 'cpu'
__lowercase = True
__lowercase = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
@skip_mps
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = torch_device == 'cpu'
__lowercase = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
| 688 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
# Get the sagemaker specific mp parameters from smp_options variable.
__lowercase = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowercase = json.loads(SCREAMING_SNAKE_CASE )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowercase = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowercase = json.loads(SCREAMING_SNAKE_CASE )
if not mpi_options.get('sagemaker_mpi_enabled' , SCREAMING_SNAKE_CASE ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _UpperCAmelCase , )
@cached_property
def a__ ( self : List[str] ) -> "torch.device":
"""simple docstring"""
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__lowercase = torch.device('cpu' )
__lowercase = 0
elif is_sagemaker_model_parallel_available():
__lowercase = smp.local_rank()
__lowercase = torch.device('cuda' , _UpperCAmelCase )
__lowercase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__lowercase = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__lowercase = torch.device('cuda' , self.local_rank )
__lowercase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowercase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowercase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__lowercase = torch.device('cuda' , self.local_rank )
__lowercase = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCAmelCase )
return device
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return False
| 688 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A__ ( unittest.TestCase ):
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ['a', 'b', 'c']
# Defaults to last layer if both are None
__lowercase , __lowercase = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['c'] )
self.assertEqual(_UpperCAmelCase , [2] )
# Out indices set to match out features
__lowercase , __lowercase = get_aligned_output_features_output_indices(['a', 'c'] , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['a', 'c'] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features set to match out indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['a', 'c'] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['a', 'c'] )
self.assertEqual(_UpperCAmelCase , [-3, -1] )
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , _UpperCAmelCase )
# Out features must be a list
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = BackboneMixin()
__lowercase = ['a', 'b', 'c']
__lowercase = ['a', 'c']
__lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__lowercase = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
__lowercase = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> List[Any]:
__lowercase = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
__lowercase = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
__lowercase = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowercase = get_config_parser()
__lowercase = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 688 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
try:
with open(SCREAMING_SNAKE_CASE , 'rb' ) as flax_state_f:
__lowercase = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowercase = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowercase = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowercase = ''
__lowercase = flatten_dict(SCREAMING_SNAKE_CASE , sep='.' )
__lowercase = pt_model.state_dict()
# keep track of unexpected & missing keys
__lowercase = []
__lowercase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowercase = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowercase = flax_key_tuple_array[:-1] + ['weight']
__lowercase = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowercase = flax_key_tuple_array[:-1] + ['weight']
__lowercase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowercase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__lowercase = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__lowercase = '.'.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__lowercase = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowercase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowercase = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 688 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__lowercase = len(bin(SCREAMING_SNAKE_CASE )[3:] )
__lowercase = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__lowercase = (
(
'1'
+ '0' * (binary_number_length - len(SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : str = StableDiffusionPanoramaPipeline
lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__lowercase = DDIMScheduler()
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowercase = CLIPTextModel(_UpperCAmelCase )
__lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowercase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
__lowercase = torch.manual_seed(_UpperCAmelCase )
__lowercase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowercase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = sd_pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowercase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = 'french fries'
__lowercase = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowercase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
__lowercase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowercase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = sd_pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=_UpperCAmelCase )
__lowercase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowercase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = sd_pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : List[Any] , _UpperCAmelCase : str=0 ) -> Tuple:
"""simple docstring"""
__lowercase = torch.manual_seed(_UpperCAmelCase )
__lowercase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = 'stabilityai/stable-diffusion-2-base'
__lowercase = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__lowercase = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=_UpperCAmelCase )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
__lowercase = pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = 0
def callback_fn(_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowercase = False
__lowercase = 'stabilityai/stable-diffusion-2-base'
__lowercase = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
__lowercase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowercase = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = 'stabilityai/stable-diffusion-2-base'
__lowercase = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='scheduler' )
__lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
__lowercase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase = self.get_inputs()
__lowercase = pipe(**_UpperCAmelCase )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 688 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
__lowercase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowercase = tokenizer('Hello there' , return_tensors='np' ).input_ids
__lowercase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
__lowercase = shift_tokens_right(_UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowercase = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
__lowercase = optax.softmax_cross_entropy(_UpperCAmelCase , onehot(_UpperCAmelCase , logits.shape[-1] ) ).mean()
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 688 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 1 |
SCREAMING_SNAKE_CASE__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> list[str]:
__lowercase = set()
# keep track of all the paths to be checked
__lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowercase = queue.pop(0 )
# get the last node from the path
__lowercase = path[-1]
if node not in explored:
__lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowercase = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowercase = [start]
__lowercase = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowercase = {start: 0, target: -1}
while queue:
__lowercase = queue.pop(0 )
if node == target:
__lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__lowercase = tmp_path_factory.getbasetemp() / 'cache'
__lowercase = test_hf_cache_home / 'datasets'
__lowercase = test_hf_cache_home / 'metrics'
__lowercase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(SCREAMING_SNAKE_CASE ) )
__lowercase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
__lowercase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE , scope='session' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , SCREAMING_SNAKE_CASE )
| 688 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE__ = os.environ.get("""USER_TOKEN""", """""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> dict[Any, Any]:
__lowercase = {
'Authorization': F"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> int:
__lowercase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowercase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__lowercase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__lowercase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__lowercase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__lowercase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__lowercase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__lowercase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__lowercase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__lowercase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__lowercase = key.replace('image_encoder.module' , 'flava.image_model' )
__lowercase = key.replace('text_encoder.module' , 'flava.text_model' )
__lowercase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__lowercase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__lowercase = key.replace('text_projection' , 'flava.text_projection' )
__lowercase = key.replace('image_projection' , 'flava.image_projection' )
__lowercase = value.float()
for key, value in codebook_state_dict.items():
__lowercase = value
return upgrade
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None ) -> List[Any]:
if config_path is not None:
__lowercase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowercase = FlavaConfig()
__lowercase = FlavaForPreTraining(SCREAMING_SNAKE_CASE ).eval()
__lowercase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_checkpoint=SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
__lowercase = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
__lowercase = upgrade_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowercase = hf_model.state_dict()
__lowercase = count_parameters(SCREAMING_SNAKE_CASE )
__lowercase = count_parameters(SCREAMING_SNAKE_CASE ) + count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 688 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> float:
if num <= 0:
raise ValueError('math domain error' )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 688 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def a__ ( *_UpperCAmelCase : Dict , **_UpperCAmelCase : int ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Image ) -> str:
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Image ) -> Dict:
__lowercase = np.array(SCREAMING_SNAKE_CASE )
__lowercase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def a__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__lowercase = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
__lowercase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = 'facebook/sam-vit-huge'
__lowercase = pipeline('mask-generation' , model=_UpperCAmelCase )
__lowercase = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
__lowercase = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0_053},
] , )
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE )
__lowercase = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Parse args
__lowercase , __lowercase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
__lowercase = parse_unknown_args(SCREAMING_SNAKE_CASE )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "wav2vec2"
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=30_72 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : str=1e-5 , _UpperCAmelCase : List[Any]="group" , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Any=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _UpperCAmelCase : int=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase : List[Any]=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=0.05 , _UpperCAmelCase : List[str]=10 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Dict=3_20 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Tuple=1_00 , _UpperCAmelCase : str=2_56 , _UpperCAmelCase : Optional[Any]=2_56 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]="sum" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : str=(5_12, 5_12, 5_12, 5_12, 15_00) , _UpperCAmelCase : Any=(5, 3, 3, 1, 1) , _UpperCAmelCase : int=(1, 2, 3, 1, 1) , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : int , ) -> int:
"""simple docstring"""
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = feat_quantizer_dropout
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
__lowercase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = xvector_output_dim
@property
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 688 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class A__ :
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = False
def a__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if not self.initialized:
__lowercase = RagRetriever(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
__lowercase = True
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
self.retriever.index.init_index()
def a__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self.retriever._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=None ) -> str:
"""simple docstring"""
if index is not None and index.is_initialized() and len(_UpperCAmelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
__lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for worker in self.retrieval_workers
] )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def a__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase = ray.get(random_worker.retrieve.remote(_UpperCAmelCase , _UpperCAmelCase ) )
else:
__lowercase , __lowercase = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase )
@classmethod
def a__ ( cls : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return super(_UpperCAmelCase , cls ).get_tokenizers(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def a__ ( cls : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = kwargs.pop('config' , _UpperCAmelCase ) or RagConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = RagTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
__lowercase = rag_tokenizer.question_encoder
__lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase = 'custom'
__lowercase = CustomHFIndex(config.retrieval_vector_size , _UpperCAmelCase )
else:
__lowercase = cls._build_index(_UpperCAmelCase )
return cls(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , retrieval_workers=_UpperCAmelCase , index=_UpperCAmelCase , )
| 688 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : str = "mgp-str"
def __init__( self : Tuple , _UpperCAmelCase : int=[32, 1_28] , _UpperCAmelCase : str=4 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : str=27 , _UpperCAmelCase : List[str]=38 , _UpperCAmelCase : Optional[Any]=5_02_57 , _UpperCAmelCase : Union[str, Any]=3_05_22 , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Dict=4.0 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=0.02 , **_UpperCAmelCase : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = max_token_length
__lowercase = num_character_labels
__lowercase = num_bpe_labels
__lowercase = num_wordpiece_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = mlp_ratio
__lowercase = distilled
__lowercase = layer_norm_eps
__lowercase = drop_rate
__lowercase = qkv_bias
__lowercase = attn_drop_rate
__lowercase = drop_path_rate
__lowercase = output_aa_attentions
__lowercase = initializer_range
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
class A__ :
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = None
__lowercase = None
__lowercase = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = len(_UpperCAmelCase )
__lowercase = None
def a__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
if sources is int:
__lowercase = [sources]
if sinks is int:
__lowercase = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
__lowercase = sources[0]
__lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
__lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__lowercase = max_input_flow
__lowercase = 0
__lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase = max_input_flow
__lowercase = size - 1
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a__ ( self : Tuple , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = algorithm(self )
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = flow_network
__lowercase = flow_network.verticesCount
__lowercase = flow_network.sourceIndex
__lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase = flow_network.graph
__lowercase = False
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
if not self.executed:
self._algorithm()
__lowercase = True
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
class A__ ( lowerCAmelCase__ ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
# use this to save your result
__lowercase = -1
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class A__ ( lowerCAmelCase__ ):
def __init__( self : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase = [0] * self.verticies_count
__lowercase = [0] * self.verticies_count
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase = 0
while i < len(_UpperCAmelCase ):
__lowercase = vertices_list[i]
__lowercase = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
__lowercase = 0
else:
i += 1
__lowercase = sum(self.preflow[self.source_index] )
def a__ ( self : List[Any] , _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase = self.heights[to_index]
if min_height is not None:
__lowercase = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE__ = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
__lowercase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
__lowercase = flatten_dict(SCREAMING_SNAKE_CASE )
return flax_params
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
__lowercase = {}
__lowercase = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__lowercase = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowercase = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowercase = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowercase = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowercase = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE )
__lowercase = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowercase = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE )
__lowercase = flax_dict[key]
__lowercase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowercase = torch.from_numpy(converted_dict[key].T )
else:
__lowercase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Any=False ) -> Any:
__lowercase = get_flax_param(SCREAMING_SNAKE_CASE )
if not use_large:
__lowercase = PixaStructVisionConfig()
__lowercase = PixaStructTextConfig()
else:
__lowercase = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
__lowercase = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
__lowercase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE )
__lowercase = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE )
__lowercase = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowercase = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__lowercase = PixaStructImageProcessor()
__lowercase = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
if use_large:
__lowercase = 4096
__lowercase = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 688 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "swin2sr"
lowerCAmelCase__ : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Optional[Any]=1_80 , _UpperCAmelCase : Tuple=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[str]=2.0 , _UpperCAmelCase : int=True , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : str=1e-5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : int="1conv" , _UpperCAmelCase : int="pixelshuffle" , **_UpperCAmelCase : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(_UpperCAmelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
lowerCAmelCase__ : CommonSchedulerState
# setable values
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : Optional[int] = None
@classmethod
def a__ ( cls : List[str] , _UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ) -> Optional[int]:
"""simple docstring"""
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : DDPMSchedulerState
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase__ : jnp.dtype
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.0_001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[jnp.ndarray] = None , _UpperCAmelCase : str = "fixed_small" , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> Tuple:
"""simple docstring"""
__lowercase = dtype
def a__ ( self : Tuple , _UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__lowercase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowercase = jnp.array(1.0 , dtype=self.dtype )
__lowercase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : List[str] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ ( self : Optional[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : int , _UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
__lowercase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowercase = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowercase = jnp.clip(_UpperCAmelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowercase = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__lowercase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowercase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowercase = variance
__lowercase = state.common.betas[t]
__lowercase = (predicted_variance + 1) / 2
__lowercase = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[jax.random.KeyArray] = None , _UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = timestep
if key is None:
__lowercase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase = model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowercase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowercase = jax.random.split(_UpperCAmelCase , num=1 )
__lowercase = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__lowercase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self : int ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 688 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "blip_2_vision_model"
def __init__( self : int , _UpperCAmelCase : Dict=14_08 , _UpperCAmelCase : Union[str, Any]=61_44 , _UpperCAmelCase : List[Any]=39 , _UpperCAmelCase : str=16 , _UpperCAmelCase : Any=2_24 , _UpperCAmelCase : int=14 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : str=0.00_001 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Any=1e-1_0 , _UpperCAmelCase : Dict=True , **_UpperCAmelCase : int , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def a__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "blip_2_qformer"
def __init__( self : Dict , _UpperCAmelCase : Optional[int]=3_05_22 , _UpperCAmelCase : Optional[Any]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : int=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Any=5_12 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-1_2 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Any=2 , _UpperCAmelCase : str=14_08 , **_UpperCAmelCase : Dict , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def a__ ( cls : Optional[int] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "blip-2"
lowerCAmelCase__ : int = True
def __init__( self : int , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[int]=32 , **_UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__lowercase = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__lowercase = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__lowercase = BlipaVisionConfig(**_UpperCAmelCase )
__lowercase = BlipaQFormerConfig(**_UpperCAmelCase )
__lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__lowercase = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.02
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : BlipaVisionConfig , _UpperCAmelCase : BlipaQFormerConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 688 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 688 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""google/fnet-base""": 512,
"""google/fnet-large""": 512,
}
SCREAMING_SNAKE_CASE__ = """▁"""
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Tuple = ["input_ids", "token_type_ids"]
lowerCAmelCase__ : Tuple = FNetTokenizer
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : Optional[int]="[SEP]" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : str="[CLS]" , _UpperCAmelCase : Tuple="[MASK]" , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
__lowercase = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def a__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 688 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 1 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=99 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : int=5_12 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
__lowercase = MegatronBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = MegatronBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = MegatronBertForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = MegatronBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a__ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = MegatronBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = MegatronBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[int] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = True
# test_resize_embeddings = False
lowerCAmelCase__ : Tuple = False
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_UpperCAmelCase )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> str:
return torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ['MYDIR'] , _UpperCAmelCase )
__lowercase = MegatronBertModel.from_pretrained(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.half()
__lowercase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = 'ii={} jj={} a={} b={}'.format(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(math.isclose(_UpperCAmelCase , _UpperCAmelCase , rel_tol=_UpperCAmelCase , abs_tol=_UpperCAmelCase ) , msg=_UpperCAmelCase )
| 688 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ) -> str:
return max(metric_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
__lowercase = [line.strip() for line in open(SCREAMING_SNAKE_CASE , 'r' ).readlines()]
__lowercase = []
if args.gold_data_mode == "qa":
__lowercase = pd.read_csv(SCREAMING_SNAKE_CASE , sep='\t' , header=SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
__lowercase = ast.literal_eval(SCREAMING_SNAKE_CASE )
answers.append(SCREAMING_SNAKE_CASE )
else:
__lowercase = [line.strip() for line in open(SCREAMING_SNAKE_CASE , 'r' ).readlines()]
__lowercase = [[reference] for reference in references]
__lowercase = __lowercase = __lowercase = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = 100.0 * em / total
__lowercase = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
__lowercase = args.k
__lowercase = [line.strip() for line in open(SCREAMING_SNAKE_CASE , 'r' ).readlines()]
__lowercase = [line.strip() for line in open(SCREAMING_SNAKE_CASE , 'r' ).readlines()]
__lowercase = __lowercase = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = set(hypo.split('\t' )[:k] )
__lowercase = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowercase = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
def strip_title(SCREAMING_SNAKE_CASE : Tuple ):
if title.startswith('"' ):
__lowercase = title[1:]
if title.endswith('"' ):
__lowercase = title[:-1]
return title
__lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , )['input_ids'].to(args.device )
__lowercase = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE )
__lowercase = question_enc_outputs[0]
__lowercase = rag_model.retriever(
SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__lowercase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowercase = []
for docs in all_docs:
__lowercase = [strip_title(SCREAMING_SNAKE_CASE ) for title in docs['title']]
provenance_strings.append('\t'.join(SCREAMING_SNAKE_CASE ) )
return provenance_strings
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
with torch.no_grad():
__lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
__lowercase = inputs_dict.input_ids.to(args.device )
__lowercase = inputs_dict.attention_mask.to(args.device )
__lowercase = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowercase = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info('Q: {} - A: {}'.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return answers
def __SCREAMING_SNAKE_CASE ( ) -> Any:
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=SCREAMING_SNAKE_CASE , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=SCREAMING_SNAKE_CASE , choices=['exact', 'compressed', 'legacy'] , type=SCREAMING_SNAKE_CASE , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=SCREAMING_SNAKE_CASE , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=SCREAMING_SNAKE_CASE , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=SCREAMING_SNAKE_CASE , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=SCREAMING_SNAKE_CASE , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=SCREAMING_SNAKE_CASE , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=SCREAMING_SNAKE_CASE , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=SCREAMING_SNAKE_CASE , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=SCREAMING_SNAKE_CASE , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=SCREAMING_SNAKE_CASE , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__lowercase = parser.parse_args()
__lowercase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
__lowercase = {}
if args.model_type is None:
__lowercase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__lowercase = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__lowercase = args.n_docs
if args.index_name is not None:
__lowercase = args.index_name
if args.index_path is not None:
__lowercase = args.index_path
else:
__lowercase = BartForConditionalGeneration
__lowercase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , SCREAMING_SNAKE_CASE )
__lowercase = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__lowercase = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(SCREAMING_SNAKE_CASE ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__lowercase = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__lowercase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , retriever=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
__lowercase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__lowercase = []
for line in tqdm(SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
__lowercase = evaluate_batch_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
preds_file.write('\n'.join(SCREAMING_SNAKE_CASE ) + '\n' )
preds_file.flush()
__lowercase = []
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = evaluate_batch_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
preds_file.write('\n'.join(SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 688 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> str:
return " ".join(
''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowercase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowercase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowercase = model(input_ids.to(_UpperCAmelCase ) , labels=labels.to(_UpperCAmelCase ) ).loss
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 688 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=False ) -> Dict:
__lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase = ''
else:
__lowercase = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ) -> Any:
__lowercase = dct.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
__lowercase = DeiTConfig()
# all deit models have fine-tuned heads
__lowercase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowercase = 1000
__lowercase = 'huggingface/label-files'
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = int(deit_name[-6:-4] )
__lowercase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__lowercase = 192
__lowercase = 768
__lowercase = 12
__lowercase = 3
elif deit_name[9:].startswith('small' ):
__lowercase = 384
__lowercase = 1536
__lowercase = 12
__lowercase = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__lowercase = 1024
__lowercase = 4096
__lowercase = 24
__lowercase = 16
# load original model from timm
__lowercase = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase = timm_model.state_dict()
__lowercase = create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
__lowercase = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowercase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowercase = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE , crop_size=config.image_size )
__lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
__lowercase = encoding['pixel_values']
__lowercase = model(SCREAMING_SNAKE_CASE )
__lowercase = timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 688 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = BioGptTokenizer
lowerCAmelCase__ : List[Any] = False
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def a__ ( self : Optional[int] , _UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = 'lower newer'
__lowercase = 'lower newer'
return input_text, output_text
def a__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowercase = 'lower'
__lowercase = ['low', 'er</w>']
__lowercase = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = tokens + ['<unk>']
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
SCREAMING_SNAKE_CASE__ = """zero2"""
SCREAMING_SNAKE_CASE__ = """zero3"""
SCREAMING_SNAKE_CASE__ = [ZEROa, ZEROa]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__lowercase = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A__ ( lowerCAmelCase__ ):
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = models[model]
__lowercase = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_auto_remove_tmp_dir('./xxx' , after=_UpperCAmelCase )
__lowercase = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__lowercase = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__lowercase = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__lowercase = self.get_launcher(_UpperCAmelCase )
__lowercase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
__lowercase = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 688 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 1 |
from math import pi
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 688 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 1 |
class A__ :
def __init__( self : Any ) -> Any:
"""simple docstring"""
__lowercase = ''
__lowercase = ''
__lowercase = []
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__lowercase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__lowercase = self.__min_dist_top_down_dp(_UpperCAmelCase , n - 1 )
__lowercase = self.__min_dist_top_down_dp(m - 1 , _UpperCAmelCase )
__lowercase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__lowercase = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = worda
__lowercase = worda
__lowercase = [[-1 for _ in range(len(_UpperCAmelCase ) )] for _ in range(len(_UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_UpperCAmelCase ) - 1 , len(_UpperCAmelCase ) - 1 )
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = worda
__lowercase = worda
__lowercase = len(_UpperCAmelCase )
__lowercase = len(_UpperCAmelCase )
__lowercase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__lowercase = j
elif j == 0: # second string is empty
__lowercase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__lowercase = self.dp[i - 1][j - 1]
else:
__lowercase = self.dp[i][j - 1]
__lowercase = self.dp[i - 1][j]
__lowercase = self.dp[i - 1][j - 1]
__lowercase = 1 + min(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
SCREAMING_SNAKE_CASE__ = input("""Enter the first string: """).strip()
SCREAMING_SNAKE_CASE__ = input("""Enter the second string: """).strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 688 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A__ ( lowerCAmelCase__ ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = dataset
__lowercase = process
__lowercase = params
def __len__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.dataset[i]
__lowercase = self.process(_UpperCAmelCase , **self.params )
return processed
class A__ ( lowerCAmelCase__ ):
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=None ) -> str:
"""simple docstring"""
__lowercase = loader
__lowercase = infer
__lowercase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowercase = None
__lowercase = loader_batch_size
# Internal bookkeeping
__lowercase = None
__lowercase = None
def __len__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.loader )
def __iter__( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = iter(self.loader )
return self
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowercase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowercase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Convert ModelOutput to tuple first
__lowercase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowercase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowercase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowercase = self._loader_batch_data.__class__(_UpperCAmelCase )
self._loader_batch_index += 1
return result
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowercase = next(self.iterator )
__lowercase = self.infer(_UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_UpperCAmelCase , torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
# Setting internal index to unwrap the batch
__lowercase = processed
__lowercase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A__ ( lowerCAmelCase__ ):
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int=None ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __iter__( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = iter(self.loader )
__lowercase = None
return self
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
if self.subiterator is None:
__lowercase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__lowercase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowercase = self.infer(next(self.iterator ) , **self.params )
__lowercase = next(self.subiterator )
return processed
class A__ ( lowerCAmelCase__ ):
def __iter__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = iter(self.loader )
return self
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = False
__lowercase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
__lowercase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_UpperCAmelCase , torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = len(_UpperCAmelCase )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
__lowercase = processed
__lowercase = 0
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
if is_last:
return accumulator
else:
__lowercase = processed
__lowercase = item.pop('is_last' )
accumulator.append(_UpperCAmelCase )
return accumulator
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Dataset , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = dataset
__lowercase = key
def __len__( self : int ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.dataset[i][self.key]
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : Dataset , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
__lowercase = dataset
__lowercase = keya
__lowercase = keya
def __len__( self : Dict ) -> Dict:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : str , _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "mask2former"
lowerCAmelCase__ : Dict = ["swin"]
lowerCAmelCase__ : List[str] = {"hidden_size": "hidden_dim"}
def __init__( self : Dict , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Any , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : Optional[int] ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000000 ) -> int:
__lowercase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 688 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=99 , _UpperCAmelCase : int=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=True , _UpperCAmelCase : Any="None" , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Tuple=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = DebertaVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0]
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0]
__lowercase = model(_UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = DebertaVaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DebertaVaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DebertaVaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = DebertaVaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = DebertaVaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : str = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Optional[int] = False
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = DebertaVaModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_UpperCAmelCase )
@slow
def a__ ( self : Any ) -> Any:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@slow
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__lowercase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 688 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 1 |
from __future__ import annotations
import requests
SCREAMING_SNAKE_CASE__ = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : str = "new" , SCREAMING_SNAKE_CASE : list | None = None ) -> dict:
__lowercase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(SCREAMING_SNAKE_CASE ) - valid_terms ) ):
__lowercase = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
__lowercase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(SCREAMING_SNAKE_CASE )}
__lowercase = {}
for id_ in range(SCREAMING_SNAKE_CASE ):
__lowercase = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Union[str, Any] = BlenderbotSmallTokenizer
lowerCAmelCase__ : Any = False
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__lowercase = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def a__ ( self : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = 'adapt act apte'
__lowercase = 'adapt act apte'
return input_text, output_text
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = 'adapt act apte'
__lowercase = ['adapt', 'act', 'ap@@', 'te']
__lowercase = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowercase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [13_84]
__lowercase = 'I am a small frog.'
__lowercase = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids']
__lowercase = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__lowercase = 'I am a small frog .'
__lowercase = '.'
__lowercase = tok(_UpperCAmelCase )['input_ids']
__lowercase = tok(_UpperCAmelCase )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 688 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A__ ( unittest.TestCase ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = inspect.getfile(accelerate.test_utils )
__lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowercase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__lowercase = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 688 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[int]="<s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : Optional[Any]="<sep>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : str="<mask>" , _UpperCAmelCase : Any=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ) -> None:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
__lowercase = jieba
__lowercase = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : int , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self : Any , _UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
if self.remove_space:
__lowercase = ' '.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__lowercase = unicodedata.normalize('NFKD' , _UpperCAmelCase )
__lowercase = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def a__ ( self : List[str] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.preprocess_text(_UpperCAmelCase )
__lowercase = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
__lowercase = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def a__ ( self : Tuple , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowercase = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip()
return out_string
def a__ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def a__ ( self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 688 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Dict:
__lowercase = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
__lowercase = {
'repo_id': str(SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(SCREAMING_SNAKE_CASE , 'git_log.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=4 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> str:
if params.n_gpu <= 0:
__lowercase = 0
__lowercase = -1
__lowercase = True
__lowercase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__lowercase = int(os.environ['WORLD_SIZE'] )
__lowercase = int(os.environ['N_GPU_NODE'] )
__lowercase = int(os.environ['RANK'] )
# number of nodes / node ID
__lowercase = params.world_size // params.n_gpu_per_node
__lowercase = params.global_rank // params.n_gpu_per_node
__lowercase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__lowercase = 1
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 1
__lowercase = 1
__lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__lowercase = params.node_id == 0 and params.local_rank == 0
__lowercase = params.n_nodes > 1
# summary
__lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Dict:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 688 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : bytes , SCREAMING_SNAKE_CASE : int ) -> np.array:
__lowercase = F"""{sampling_rate}"""
__lowercase = '1'
__lowercase = 'f32le'
__lowercase = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowercase = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
__lowercase = output_stream[0]
__lowercase = np.frombuffer(SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : str = "f32le" , ) -> List[Any]:
__lowercase = F"""{sampling_rate}"""
__lowercase = '1'
if format_for_conversion == "s16le":
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
__lowercase = platform.system()
if system == "Linux":
__lowercase = 'alsa'
__lowercase = 'default'
elif system == "Darwin":
__lowercase = 'avfoundation'
__lowercase = ':0'
elif system == "Windows":
__lowercase = 'dshow'
__lowercase = 'default'
__lowercase = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowercase = _ffmpeg_stream(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[Tuple[float, float], float]] = None , SCREAMING_SNAKE_CASE : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
__lowercase = stream_chunk_s
else:
__lowercase = chunk_length_s
__lowercase = ffmpeg_microphone(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , format_for_conversion=SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
__lowercase = np.intaa
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = np.floataa
__lowercase = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
__lowercase = chunk_length_s / 6
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__lowercase = [stride_length_s, stride_length_s]
__lowercase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowercase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowercase = datetime.datetime.now()
__lowercase = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
__lowercase = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE )
__lowercase = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
__lowercase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple[int, int] , SCREAMING_SNAKE_CASE : bool = False ) -> int:
__lowercase = b''
__lowercase , __lowercase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
__lowercase = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE ) < chunk_len:
__lowercase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
__lowercase = (_stride_left, stride_right)
__lowercase = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
__lowercase = False
yield item
__lowercase = stride_left
__lowercase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE ) > stride_left:
__lowercase = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
__lowercase = False
yield item
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ) -> Any:
__lowercase = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
__lowercase = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 688 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 32
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 , SCREAMING_SNAKE_CASE : str = "bert-base-cased" ) -> Union[str, Any]:
__lowercase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__lowercase = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> str:
model.eval()
__lowercase = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__lowercase = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ) -> int:
# Initialize accelerator
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['lr']
__lowercase = int(config['num_epochs'] )
__lowercase = int(config['seed'] )
__lowercase = int(config['batch_size'] )
__lowercase = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__lowercase = 1
__lowercase = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__lowercase = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
__lowercase = evaluate.load('glue' , 'mrpc' )
__lowercase = num_epochs
if args.partial_train_epoch is not None:
__lowercase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowercase = args.resume_from_checkpoint.split('epoch_' )[1]
__lowercase = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowercase = int(SCREAMING_SNAKE_CASE ) + 1
__lowercase = evaluation_loop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.print('resumed checkpoint performance:' , SCREAMING_SNAKE_CASE )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
__lowercase = json.load(SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowercase = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowercase = F"""epoch_{epoch}"""
__lowercase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
__lowercase = evaluation_loop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = accuracy
__lowercase = lr_scheduler.get_lr()[0]
__lowercase = optimizer.param_groups[0]['lr']
__lowercase = epoch
__lowercase = overall_step
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE , default=2 , help='Number of train epochs.' , )
__lowercase = parser.parse_args()
__lowercase = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 688 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "unispeech-sat"
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=30_72 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : Dict="group" , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _UpperCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=1_28 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : str=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : Dict=0.05 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Dict=3_20 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=1_00 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : Optional[int]=2_56 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple="mean" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str=False , _UpperCAmelCase : int=2_56 , _UpperCAmelCase : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , _UpperCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , _UpperCAmelCase : str=(1, 2, 3, 1, 1) , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Optional[int]=5_04 , **_UpperCAmelCase : int , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = conv_bias
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = vocab_size
__lowercase = num_clusters
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
__lowercase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = feat_quantizer_dropout
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
__lowercase = xvector_output_dim
@property
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : list ) -> int:
__lowercase = '\n'.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('w' ).writelines(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE__ = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-mbart"""
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__lowercase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
run_generate()
assert Path(_UpperCAmelCase ).exists()
# os.remove(Path(output_file_name))
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a__ ( self : Dict , _UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
self.run_eval_tester(_UpperCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__lowercase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / 'scores.json' )
__lowercase = str(tmp_dir / 'val.target' )
_dump_articles(_UpperCAmelCase , text['en'] )
_dump_articles(_UpperCAmelCase , text['de'] )
__lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__lowercase = f"""
run_eval_search.py
{model}
{str(_UpperCAmelCase )}
{str(_UpperCAmelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [' num_beams | length_penalty', model, 'Best score args']
__lowercase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(_UpperCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_UpperCAmelCase ).exists()
os.remove(Path(_UpperCAmelCase ) )
| 688 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000 ) -> int:
__lowercase = 2**power
__lowercase = 0
while n:
__lowercase , __lowercase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 2000000 ) -> int:
__lowercase = [0]
__lowercase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowercase = 0
# the area corresponding to the grid that gives the product closest to target
__lowercase = 0
# an estimate of b, using the quadratic formula
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the triangle number corresponding to b_floor
__lowercase = 42
# the triangle number corresponding to b_ceil
__lowercase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowercase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowercase = floor(SCREAMING_SNAKE_CASE )
__lowercase = ceil(SCREAMING_SNAKE_CASE )
__lowercase = triangle_numbers[b_floor]
__lowercase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_first_guess * triangle_a
__lowercase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_second_guess * triangle_a
__lowercase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 688 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Dict:
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowercase = Image.open(SCREAMING_SNAKE_CASE )
return im.convert('RGB' )
@dataclass
class A__ :
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase__ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A__ :
lowerCAmelCase__ : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ : str = field(default=lowerCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = torch.stack([example['pixel_values'] for example in examples] )
__lowercase = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowercase = {}
if data_args.train_dir is not None:
__lowercase = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowercase = os.path.join(data_args.validation_dir , '**' )
__lowercase = load_dataset(
'imagefolder' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowercase = dataset['train'].train_test_split(data_args.train_val_split )
__lowercase = split['train']
__lowercase = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase = dataset['train'].features['labels'].names
__lowercase , __lowercase = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
__lowercase = str(SCREAMING_SNAKE_CASE )
__lowercase = label
# Load the accuracy metric from the datasets package
__lowercase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : str ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowercase = image_processor.size['shortest_edge']
else:
__lowercase = (image_processor.size['height'], image_processor.size['width'])
__lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowercase = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowercase = Compose(
[
Resize(SCREAMING_SNAKE_CASE ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE : str ):
__lowercase = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE : List[str] ):
__lowercase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initalize our trainer
__lowercase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowercase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 688 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
SCREAMING_SNAKE_CASE__ = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
lowerCAmelCase__ : Optional[int] = DistilBertTokenizer
def __init__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=True , _UpperCAmelCase : Tuple="[UNK]" , _UpperCAmelCase : str="[SEP]" , _UpperCAmelCase : str="[PAD]" , _UpperCAmelCase : List[str]="[CLS]" , _UpperCAmelCase : Union[str, Any]="[MASK]" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_UpperCAmelCase )
__lowercase = do_lower_case
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict=None ) -> List[str]:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "roformer"
def __init__( self : str , _UpperCAmelCase : List[Any]=5_00_00 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=30_72 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=15_36 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Tuple=1e-1_2 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size if embedding_size is None else embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = rotary_value
__lowercase = use_cache
class A__ ( lowerCAmelCase__ ):
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase = {0: 'batch', 1: 'sequence'}
__lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 688 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = 0
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
__lowercase = CLIPImageProcessor(**_UpperCAmelCase )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
config.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
# make sure private variable is not incorrectly saved
__lowercase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
__lowercase = AutoImageProcessor.from_pretrained('clip-base' )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def a__ ( self : int ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
__lowercase = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_UpperCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_UpperCAmelCase , 'w' ) )
__lowercase = CustomImageProcessor.from_pretrained(_UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
__lowercase = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = True
try:
AutoConfig.register('custom' , _UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowercase = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowercase = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 688 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 1 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Any , **_UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : Dict , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def a__ ( cls : int , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 688 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
import os
SCREAMING_SNAKE_CASE__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = 0
__lowercase = 0
while index < len(SCREAMING_SNAKE_CASE ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> str:
__lowercase = ''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = "/p089_roman.txt" ) -> int:
__lowercase = 0
with open(os.path.dirname(SCREAMING_SNAKE_CASE ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(SCREAMING_SNAKE_CASE )
__lowercase = generate_roman_numerals(SCREAMING_SNAKE_CASE )
savings += len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self : str , _UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return {}, {}, postprocess_params
def __call__( self : Tuple , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = load_image(_UpperCAmelCase )
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model(**_UpperCAmelCase )
return model_outputs
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=5 ) -> Any:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
__lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowercase = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
__lowercase , __lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 688 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
__lowercase = EfficientNetConfig()
__lowercase = CONFIG_MAP[model_name]['hidden_dim']
__lowercase = CONFIG_MAP[model_name]['width_coef']
__lowercase = CONFIG_MAP[model_name]['depth_coef']
__lowercase = CONFIG_MAP[model_name]['image_size']
__lowercase = CONFIG_MAP[model_name]['dropout_rate']
__lowercase = CONFIG_MAP[model_name]['dw_padding']
__lowercase = 'huggingface/label-files'
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = 1000
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
__lowercase = CONFIG_MAP[model_name]['image_size']
__lowercase = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=SCREAMING_SNAKE_CASE , )
return preprocessor
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
__lowercase = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
__lowercase = sorted(set(SCREAMING_SNAKE_CASE ) )
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {b: str(SCREAMING_SNAKE_CASE ) for b, i in zip(SCREAMING_SNAKE_CASE , range(SCREAMING_SNAKE_CASE ) )}
__lowercase = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
__lowercase = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
__lowercase = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase = 'efficientnet.' + item[1]
__lowercase = 'classifier.weight'
__lowercase = 'classifier.bias'
return key_mapping
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> int:
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase = torch.from_numpy(SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=SCREAMING_SNAKE_CASE , input_shape=SCREAMING_SNAKE_CASE , pooling=SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
__lowercase = original_model.trainable_variables
__lowercase = original_model.non_trainable_variables
__lowercase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase = param.numpy()
__lowercase = list(tf_params.keys() )
# Load HuggingFace model
__lowercase = get_efficientnet_config(SCREAMING_SNAKE_CASE )
__lowercase = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
__lowercase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
__lowercase = rename_keys(SCREAMING_SNAKE_CASE )
replace_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
__lowercase = convert_image_processor(SCREAMING_SNAKE_CASE )
__lowercase = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase = hf_model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.detach().numpy()
# Original model inference
__lowercase = False
__lowercase = CONFIG_MAP[model_name]['image_size']
__lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase = image.img_to_array(SCREAMING_SNAKE_CASE )
__lowercase = np.expand_dims(SCREAMING_SNAKE_CASE , axis=0 )
__lowercase = original_model.predict(SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.mkdir(SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
__lowercase = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( lowerCAmelCase__ ):
def __init__( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=64 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Optional[Any]=1 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = q_groups
__lowercase = k_groups
__lowercase = v_groups
__lowercase = post_attention_groups
__lowercase = intermediate_groups
__lowercase = output_groups
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase__ : int = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[int] = False
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = SqueezeBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
__lowercase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
| 688 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowercase = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
__lowercase = features.copy()
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = tmp_path / 'cache'
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ) -> str:
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = [jsonl_path]
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=("train",) ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
__lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = JsonDatasetReader({'train': jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = JsonDatasetReader({'train': jsonl_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
if split:
__lowercase = {split: jsonl_path}
else:
__lowercase = 'train'
__lowercase = {'train': jsonl_path, 'test': jsonl_path}
__lowercase = tmp_path / 'cache'
__lowercase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowercase = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
return json.load(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
return [json.loads(SCREAMING_SNAKE_CASE ) for line in buffer]
class A__ :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def a__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase ).write()
buffer.seek(0 )
__lowercase = load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase ).write()
buffer.seek(0 )
__lowercase = load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowercase = load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def a__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
__lowercase = load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
def a__ ( self : str , _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
with pytest.raises(_UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def a__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
__lowercase = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , compression=_UpperCAmelCase ).write()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
__lowercase = f.read()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
__lowercase = f.read()
assert exported_content == original_content
| 688 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 1 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE__ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__lowercase = []
for num in range(len(SCREAMING_SNAKE_CASE ) ):
__lowercase = 0
while 2 * i * i <= odd_composites[num]:
__lowercase = odd_composites[num] - 2 * i * i
if is_prime(SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 688 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 688 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(SCREAMING_SNAKE_CASE ), magnitude * sin(SCREAMING_SNAKE_CASE )]
return [magnitude * cos(radians(SCREAMING_SNAKE_CASE ) ), magnitude * sin(radians(SCREAMING_SNAKE_CASE ) )]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : float = 10**-1 ) -> bool:
__lowercase = cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = sum(SCREAMING_SNAKE_CASE )
return abs(SCREAMING_SNAKE_CASE ) < eps
if __name__ == "__main__":
# Test to check if it works
SCREAMING_SNAKE_CASE__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
SCREAMING_SNAKE_CASE__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
SCREAMING_SNAKE_CASE__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
SCREAMING_SNAKE_CASE__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
SCREAMING_SNAKE_CASE__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
SCREAMING_SNAKE_CASE__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 688 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 1 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __SCREAMING_SNAKE_CASE ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = random_quotes()
pprint.pprint(response)
| 688 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : str = "CLIPImageProcessor"
lowerCAmelCase__ : Tuple = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , _UpperCAmelCase : int=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowercase = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
__lowercase = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : List[str] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : int ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int]=False ) -> Union[str, Any]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "bridgetower_vision_model"
def __init__( self : int , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Tuple=2_88 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Optional[Any]=1e-0_5 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=False , **_UpperCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def a__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
__lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "bridgetower_text_model"
def __init__( self : str , _UpperCAmelCase : Tuple=5_02_65 , _UpperCAmelCase : Dict=7_68 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=5_14 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=1e-0_5 , _UpperCAmelCase : int=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Optional[int]="absolute" , _UpperCAmelCase : str=True , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
"""simple docstring"""
__lowercase , __lowercase = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if config_dict.get('model_type' ) == "bridgetower":
__lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "bridgetower"
def __init__( self : Union[str, Any] , _UpperCAmelCase : str=True , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : List[Any]=1e-0_5 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Tuple="add" , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=6 , _UpperCAmelCase : Any=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Tuple , ) -> str:
"""simple docstring"""
__lowercase = kwargs.pop('text_config_dict' , _UpperCAmelCase )
__lowercase = kwargs.pop('vision_config_dict' , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
__lowercase = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
__lowercase = BridgeTowerTextConfig(**_UpperCAmelCase )
__lowercase = BridgeTowerVisionConfig(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Optional[Any] , _UpperCAmelCase : BridgeTowerTextConfig , _UpperCAmelCase : BridgeTowerVisionConfig , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 688 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
__lowercase = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
__lowercase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__lowercase = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
__lowercase = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 688 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
# Construct model
if gpta_config_file == "":
__lowercase = GPTaConfig()
else:
__lowercase = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE )
__lowercase = GPTaModel(SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
# save results
if os.path.exists(SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , 'config.json' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE , 'config.json' ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=False ) -> str:
__lowercase = 2
if unlogit:
__lowercase = torch.pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = p * torch.log(SCREAMING_SNAKE_CASE )
__lowercase = 0
return -plogp.sum(dim=-1 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
logger.info('lv, h >\t' + '\t'.join(F"""{x + 1}""" for x in range(len(SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=False ) -> List[str]:
__lowercase , __lowercase = model.config.num_hidden_layers, model.config.num_attention_heads
__lowercase = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
__lowercase = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
__lowercase = torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowercase = None
__lowercase = 0.0
__lowercase = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__lowercase = tuple(t.to(args.device ) for t in inputs )
((__lowercase) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowercase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowercase , __lowercase , __lowercase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE ):
__lowercase = entropy(attn.detach() , SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowercase = 2
__lowercase = torch.pow(torch.pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__lowercase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(SCREAMING_SNAKE_CASE )
logger.info('Head ranked by importance scores' )
__lowercase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowercase = torch.arange(
head_importance.numel() , device=args.device )
__lowercase = head_ranks.view_as(SCREAMING_SNAKE_CASE )
print_ad_tensor(SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> str:
__lowercase , __lowercase , __lowercase = compute_heads_importance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE )
__lowercase = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
__lowercase = torch.ones_like(SCREAMING_SNAKE_CASE )
__lowercase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowercase = original_score
while current_score >= original_score * args.masking_threshold:
__lowercase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowercase = float('Inf' )
__lowercase = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__lowercase = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__lowercase = new_head_mask.view(-1 )
__lowercase = 0.0
__lowercase = new_head_mask.view_as(SCREAMING_SNAKE_CASE )
__lowercase = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
__lowercase , __lowercase , __lowercase = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
__lowercase = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ) -> Any:
__lowercase = datetime.now()
__lowercase , __lowercase , __lowercase = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , compute_importance=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
__lowercase = 1 / loss
__lowercase = datetime.now() - before_time
__lowercase = sum(p.numel() for p in model.parameters() )
__lowercase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE )
__lowercase = sum(p.numel() for p in model.parameters() )
__lowercase = datetime.now()
__lowercase , __lowercase , __lowercase = compute_heads_importance(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , compute_entropy=SCREAMING_SNAKE_CASE , compute_importance=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , actually_pruned=SCREAMING_SNAKE_CASE , )
__lowercase = 1 / loss
__lowercase = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(SCREAMING_SNAKE_CASE , args.output_dir )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=SCREAMING_SNAKE_CASE , help='Batch size.' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
__lowercase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowercase = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__lowercase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowercase = torch.device('cuda' , args.local_rank )
__lowercase = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowercase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowercase = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
__lowercase = nn.DataParallel(SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Prepare dataset
__lowercase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowercase = (torch.from_numpy(SCREAMING_SNAKE_CASE ),)
__lowercase = TensorDataset(*SCREAMING_SNAKE_CASE )
__lowercase = RandomSampler(SCREAMING_SNAKE_CASE )
__lowercase = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowercase = mask_heads(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
prune_heads(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = "xglm"
lowerCAmelCase__ : str = ["past_key_values"]
lowerCAmelCase__ : Tuple = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple=25_60_08 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Dict=40_96 , _UpperCAmelCase : Dict=24 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : int=2 , **_UpperCAmelCase : str , ) -> List[str]:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = ffn_dim
__lowercase = num_layers
__lowercase = attention_heads
__lowercase = activation_function
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = layerdrop
__lowercase = init_std
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = use_cache
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 688 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Any:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
class A__ :
def __init__( self : Optional[int] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = metric_id
class A__ :
lowerCAmelCase__ : int = [MetricMock(lowerCAmelCase__ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
if "tmp_path" in args:
__lowercase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(SCREAMING_SNAKE_CASE , match='https://huggingface.co/docs/evaluate' ):
func(*SCREAMING_SNAKE_CASE )
| 688 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 1 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) )
__lowercase = 0
while arr[min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1] < x:
__lowercase = step
step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowercase = prev + 1
if prev == min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(""",""")]
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be searched:\n"""))
SCREAMING_SNAKE_CASE__ = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'''Number {x} is at index {res}''')
| 688 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=None ) -> int:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , )
__lowercase = None
def a__ ( self : Tuple , _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=_UpperCAmelCase , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def a__ ( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=torch.floataa ) -> int:
"""simple docstring"""
__lowercase = torch.empty(_UpperCAmelCase , dtype=_UpperCAmelCase )
dist.scatter(_UpperCAmelCase , src=0 , scatter_list=_UpperCAmelCase , group=self.process_group )
return target_tensor
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith('e' )) , _UpperCAmelCase )
return ifname
def a__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCAmelCase )]
dist.gather(torch.tensor(_UpperCAmelCase ) , dst=0 , gather_list=_UpperCAmelCase , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(_UpperCAmelCase ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(_UpperCAmelCase ).numpy() , _UpperCAmelCase )
__lowercase , __lowercase = torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
__lowercase = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._scattered(_UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(_UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCAmelCase )
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
__lowercase = AutoTokenizer.from_pretrained('xlm-roberta-base' )
__lowercase = 'The dog is cute and lives in the garden house'
__lowercase = jnp.array([tokenizer.encode(_UpperCAmelCase )] )
__lowercase = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__lowercase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__lowercase = model(_UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 688 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 1 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 1 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 688 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.