code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
__UpperCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
__UpperCAmelCase , padding='''max_length''' , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = 100 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
__lowerCamelCase ,__lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
__lowerCamelCase = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
__lowerCamelCase = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
__lowerCamelCase = torch.exp(__UpperCAmelCase )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 330 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__lowerCamelCase = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 154 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Dict = StableDiffusionLDMaDPipeline
lowerCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ) -> str:
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
A__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=__UpperCAmelCase ,set_alpha_to_one=__UpperCAmelCase ,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
A__ = CLIPTextModel(__UpperCAmelCase )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> Dict:
if str(__UpperCAmelCase ).startswith('mps' ):
A__ = torch.manual_seed(__UpperCAmelCase )
else:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A__ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self ) -> List[str]:
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 3 * [inputs['prompt']]
# forward
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 3 * [inputs.pop('prompt' )]
A__ = ldmad_pipe.tokenizer(
__UpperCAmelCase ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=__UpperCAmelCase ,return_tensors='pt' ,)
A__ = text_inputs['input_ids'].to(__UpperCAmelCase )
A__ = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
A__ = prompt_embeds
# forward
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self ) -> int:
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 'french fries'
A__ = ldmad_pipe(**__UpperCAmelCase ,negative_prompt=__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A__ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=0 ) -> Optional[int]:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
A__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> Optional[Any]:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1].flatten()
A__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
A__ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A__ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=0 ) -> int:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
A__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_9_5_5_8_6
A__ = 0.3_3_7_9_5_5_1_5
A__ = 1_1_2.4_8_5_1_8
A__ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self ) -> Optional[int]:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_1_9_4_1_2_7
A__ = 0.3_5_3_7_5_5_8_6
A__ = 0.5_6_3_8_5_0_2
A__ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 154 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
__lowercase =3_84
__lowercase =7
if "tiny" in model_name:
__lowercase =96
__lowercase =(2, 2, 6, 2)
__lowercase =(3, 6, 12, 24)
elif "small" in model_name:
__lowercase =96
__lowercase =(2, 2, 18, 2)
__lowercase =(3, 6, 12, 24)
elif "base" in model_name:
__lowercase =1_28
__lowercase =(2, 2, 18, 2)
__lowercase =(4, 8, 16, 32)
__lowercase =12
__lowercase =5_12
elif "large" in model_name:
__lowercase =1_92
__lowercase =(2, 2, 18, 2)
__lowercase =(6, 12, 24, 48)
__lowercase =12
__lowercase =7_68
# set label information
__lowercase =1_50
__lowercase ='huggingface/label-files'
__lowercase ='ade20k-id2label.json'
__lowercase =json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='dataset' ), 'r' ) )
__lowercase ={int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =SwinConfig(
embed_dim=lowercase__, depths=lowercase__, num_heads=lowercase__, window_size=lowercase__, out_features=['stage1', 'stage2', 'stage3', 'stage4'], )
__lowercase =UperNetConfig(
backbone_config=lowercase__, auxiliary_in_channels=lowercase__, num_labels=lowercase__, idalabel=lowercase__, labelaid=lowercase__, )
return config
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
__lowercase =[]
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( lowercase__ : str, lowercase__ : Any, lowercase__ : Union[str, Any] ):
'''simple docstring'''
__lowercase =dct.pop(lowercase__ )
__lowercase =val
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
__lowercase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase =state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__lowercase =state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[:dim, :]
__lowercase =in_proj_bias[: dim]
__lowercase =in_proj_weight[
dim : dim * 2, :
]
__lowercase =in_proj_bias[
dim : dim * 2
]
__lowercase =in_proj_weight[
-dim :, :
]
__lowercase =in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
__lowercase , __lowercase =x.shape
__lowercase =x.reshape(lowercase__, 4, in_channel // 4 )
__lowercase =x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(lowercase__, lowercase__ )
return x
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
__lowercase , __lowercase =x.shape
__lowercase =x.reshape(lowercase__, in_channel // 4, 4 )
__lowercase =x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(lowercase__, lowercase__ )
return x
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =x.shape[0]
__lowercase =x.reshape(4, in_channel // 4 )
__lowercase =x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(lowercase__ )
return x
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =x.shape[0]
__lowercase =x.reshape(in_channel // 4, 4 )
__lowercase =x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(lowercase__ )
return x
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : List[str], lowercase__ : Union[str, Any] ):
'''simple docstring'''
__lowercase ={
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
__lowercase =model_name_to_url[model_name]
__lowercase =torch.hub.load_state_dict_from_url(lowercase__, map_location='cpu', file_name=lowercase__ )[
'state_dict'
]
for name, param in state_dict.items():
print(lowercase__, param.shape )
__lowercase =get_upernet_config(lowercase__ )
__lowercase =UperNetForSemanticSegmentation(lowercase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase =state_dict.pop(lowercase__ )
if "bn" in key:
__lowercase =key.replace('bn', 'batch_norm' )
__lowercase =val
# rename keys
__lowercase =create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__, lowercase__, lowercase__ )
read_in_q_k_v(lowercase__, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase =reverse_correct_unfold_reduction_order(lowercase__ )
if "norm" in key:
__lowercase =reverse_correct_unfold_norm_order(lowercase__ )
model.load_state_dict(lowercase__ )
# verify on image
__lowercase ='https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__lowercase =Image.open(requests.get(lowercase__, stream=lowercase__ ).raw ).convert('RGB' )
__lowercase =SegformerImageProcessor()
__lowercase =processor(lowercase__, return_tensors='pt' ).pixel_values
with torch.no_grad():
__lowercase =model(lowercase__ )
__lowercase =outputs.logits
print(logits.shape )
print('First values of logits:', logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowercase =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__lowercase =torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__lowercase =torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__lowercase =torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:', outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], lowercase__, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 141 |
'''simple docstring'''
import datasets
UpperCAmelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : List[str] ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def snake_case ( self : List[str] , __lowercase : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
| 141 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ ( a_ ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=snake_case__ , unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = None , snake_case__ = 0.0 , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
UpperCAmelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case__ , )
UpperCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(snake_case__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
UpperCAmelCase = self.unet(snake_case__ , snake_case__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase = self.vqvae.decode(snake_case__ ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 365 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(snake_case__ )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 248 | 0 |
"""simple docstring"""
lowerCAmelCase : Dict = 256
# Modulus to hash a string
lowerCAmelCase : str = 100_0003
def a__ ( snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = len(snake_case__ )
if p_len > t_len:
return False
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
lowerCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a__ ( ) -> None:
lowerCamelCase = """abc1abc12"""
lowerCamelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCamelCase = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ )
# Test 2)
lowerCamelCase = """ABABX"""
lowerCamelCase = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 3)
lowerCamelCase = """AAAB"""
lowerCamelCase = """ABAAAAAB"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 4)
lowerCamelCase = """abcdabcy"""
lowerCamelCase = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ , snake_case__ )
# Test 5)
lowerCamelCase = """Lü"""
lowerCamelCase = """Lüsai"""
assert rabin_karp(snake_case__ , snake_case__ )
lowerCamelCase = """Lue"""
assert not rabin_karp(snake_case__ , snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 291 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :List[Any] , a :Optional[Any]=False , a :List[str]=True , a :str=False , a :Optional[Any]="<s>" , a :Tuple="</s>" , a :int="<unk>" , a :Optional[Any]="<sep>" , a :List[str]="<pad>" , a :Any="<cls>" , a :List[Any]="<mask>" , a :Optional[Any]=["<eop>", "<eod>"] , a :Optional[Dict[str, Any]] = None , **a :List[str] , ) -> None:
__UpperCamelCase : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : int = 3
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : int = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCamelCase : Optional[Any] = jieba
__UpperCamelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
return len(self.sp_model )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :Optional[int] , a :Dict ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> int:
if self.remove_space:
__UpperCamelCase : int = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : Tuple = unicodedata.normalize("NFKD" , a )
__UpperCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
__UpperCamelCase : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[Any] = self.preprocess_text(a )
__UpperCamelCase : int = self.sp_model.encode(a , out_type=a )
__UpperCamelCase : Optional[Any] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : List[str] = cur_pieces[1:]
else:
__UpperCamelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self :Tuple , a :int ) -> Tuple:
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : str = "".join(a ).replace(a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1, 1]
return ([0] * len(a )) + [1, 1]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , *a :str , **a :Any ) -> Tuple:
__UpperCamelCase : int = super()._decode(*a , **a )
__UpperCamelCase : int = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 151 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 150 | """simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=7_0_0_0_0 ) -> Optional[int]:
"""simple docstring"""
snake_case = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = np.dot(x.T , h - y ) / y.size
snake_case = theta - alpha * gradient # updating the weights
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = datasets.load_iris()
SCREAMING_SNAKE_CASE__ = iris.data[:, :2]
SCREAMING_SNAKE_CASE__ = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE__ = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 150 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A( a , unittest.TestCase ):
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = '''adapt react readapt apt'''
__a = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = '''adapt react readapt apt'''
__a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__a = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__a = tokens + [tokenizer.unk_token]
__a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) | 33 |
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution()) | 33 | 1 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowercase__ = '1'
lowercase__ = '0'
lowercase__ = '1'
lowercase__ = ort.SessionOptions()
lowercase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
lowercase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
lowercase__ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
lowercase__ = ort.RunOptions()
lowercase__ = 128
lowercase__ = 1
lowercase__ = np.ones((batch, sequence), dtype=np.intaa)
lowercase__ = np.ones((batch, sequence), dtype=np.intaa)
lowercase__ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
lowercase__ = time.time()
lowercase__ = 2000
lowercase__ = {}
for iter in range(max_iters):
lowercase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters)) | 96 |
import math
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase_)
if number < 1:
lowerCAmelCase__ : Dict = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase_)
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ : Optional[Any] = int(math.log(number // 3 ,2)) + 2
lowerCAmelCase__ : Optional[Any] = [3, 5]
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Tuple = 3
for block in range(1 ,lowerCamelCase_):
for _ in range(lowerCamelCase_):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case : Optional[int] =0
try:
__snake_case : List[Any] =proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 129 | 0 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = sum(_UpperCAmelCase )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return result
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : int , ) -> None:
'''simple docstring'''
if sum(_UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase )) < max_sum:
return
if sum(_UpperCAmelCase ) == max_sum:
result.append(_UpperCAmelCase )
return
for index in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
create_state_space_tree(
_UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase__ = [3, 34, 4, 12, 5, 2]
UpperCAmelCase__ = 9
UpperCAmelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 368 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowerCAmelCase :
def __init__( self : Any , A : str = "cpu" , A : str = "openai/clip-vit-large-patch14") -> None:
"""simple docstring"""
_UpperCAmelCase = device
_UpperCAmelCase = CLIPTokenizerFast.from_pretrained(A)
_UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
_UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
_UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std)
_UpperCAmelCase = torchvision.transforms.Resize(2_24)
_UpperCAmelCase = torchvision.transforms.CenterCrop(2_24)
def _lowerCamelCase ( self : str , A : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.resize(A)
_UpperCAmelCase = self.center_crop(A)
_UpperCAmelCase = self.normalize(A)
return images
def __call__( self : Any , A : Dict=None , A : Dict=None , **A : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(text=A , **A)
_UpperCAmelCase = self.preprocess_img(A)
_UpperCAmelCase = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , A : Any=10 , A : List[Any]=0.0_1 , A : Optional[int]=None , A : int=None , A : Dict=None , A : Tuple=None , A : str=None , A : Dict=None , A : Union[str, Any]=False , A : Any=True , A : Any="image" , A : Tuple=True , A : List[Any]=False , A : int=False , A : int=False , ) -> None:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = None
_UpperCAmelCase = device if device else get_device()
if vqgan:
_UpperCAmelCase = vqgan
else:
_UpperCAmelCase = load_vqgan(self.device , conf_path=A , ckpt_path=A)
self.vqgan.eval()
if clip:
_UpperCAmelCase = clip
else:
_UpperCAmelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.clip.to(self.device)
_UpperCAmelCase = ProcessorGradientFlow(device=self.device)
_UpperCAmelCase = iterations
_UpperCAmelCase = lr
_UpperCAmelCase = log
_UpperCAmelCase = make_grid
_UpperCAmelCase = return_val
_UpperCAmelCase = quantize
_UpperCAmelCase = self.vqgan.decoder.z_shape
def _lowerCamelCase ( self : Optional[int] , A : int=None , A : Union[str, Any]=None , A : Dict=5 , A : Optional[Any]=True) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = []
if output_path is None:
_UpperCAmelCase = './animation.gif'
if input_path is None:
_UpperCAmelCase = self.save_path
_UpperCAmelCase = sorted(glob(input_path + '/*'))
if not len(A):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)')
if len(A) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)')
_UpperCAmelCase = total_duration / len(A)
_UpperCAmelCase = [frame_duration] * len(A)
if extend_frames:
_UpperCAmelCase = 1.5
_UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith('.png'):
images.append(imageio.imread(A))
imageio.mimsave(A , A , duration=A)
print(F"gif saved to {output_path}")
def _lowerCamelCase ( self : List[str] , A : Optional[Any]=None , A : Optional[int]=None) -> int:
"""simple docstring"""
if not (path or img):
raise ValueError('Input either path or tensor')
if img is not None:
raise NotImplementedError
_UpperCAmelCase = preprocess(Image.open(A) , target_image_size=2_56).to(self.device)
_UpperCAmelCase = preprocess_vqgan(A)
_UpperCAmelCase , *_UpperCAmelCase = self.vqgan.encode(A)
return z
def _lowerCamelCase ( self : List[str] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.latent.detach().requires_grad_()
_UpperCAmelCase = base_latent + transform_vector
if self.quantize:
_UpperCAmelCase , *_UpperCAmelCase = self.vqgan.quantize(A)
else:
_UpperCAmelCase = trans_latent
return self.vqgan.decode(A)
def _lowerCamelCase ( self : Any , A : Dict , A : Dict , A : Optional[Any]=None) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.clip_preprocessor(text=A , images=A , return_tensors='pt' , padding=A)
_UpperCAmelCase = self.clip(**A)
_UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
_UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def _lowerCamelCase ( self : Optional[int] , A : Dict , A : int , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self._get_clip_similarity(pos_prompts['prompts'] , A , weights=(1 / pos_prompts['weights']))
if neg_prompts:
_UpperCAmelCase = self._get_clip_similarity(neg_prompts['prompts'] , A , weights=neg_prompts['weights'])
else:
_UpperCAmelCase = torch.tensor([1] , device=self.device)
_UpperCAmelCase = -torch.log(A) + torch.log(A)
return loss
def _lowerCamelCase ( self : Tuple , A : Optional[int] , A : List[Any] , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.randn_like(self.latent , requires_grad=A , device=self.device)
_UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_UpperCAmelCase = self._add_vector(A)
_UpperCAmelCase = loop_post_process(A)
_UpperCAmelCase = self._get_CLIP_loss(A , A , A)
print('CLIP loss' , A)
if self.log:
wandb.log({'CLIP Loss': clip_loss})
clip_loss.backward(retain_graph=A)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def _lowerCamelCase ( self : Dict , A : Any , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
wandb.init(reinit=A , project='face-editor')
wandb.config.update({'Positive Prompts': positive_prompts})
wandb.config.update({'Negative Prompts': negative_prompts})
wandb.config.update({'lr': self.lr, 'iterations': self.iterations})
if image_path:
_UpperCAmelCase = Image.open(A)
_UpperCAmelCase = image.resize((2_56, 2_56))
wandb.log('Original Image' , wandb.Image(A))
def _lowerCamelCase ( self : Dict , A : int) -> Dict:
"""simple docstring"""
if not prompts:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if isinstance(A , A):
_UpperCAmelCase = [prompt.strip() for prompt in prompts.split('|')]
for prompt in prompts:
if isinstance(A , (tuple, list)):
_UpperCAmelCase = prompt[0]
_UpperCAmelCase = float(prompt[1])
elif ":" in prompt:
_UpperCAmelCase , _UpperCAmelCase = prompt.split(':')
_UpperCAmelCase = float(A)
else:
_UpperCAmelCase = prompt
_UpperCAmelCase = 1.0
processed_prompts.append(A)
weights.append(A)
return {
"prompts": processed_prompts,
"weights": torch.tensor(A , device=self.device),
}
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Union[str, Any]=None , A : int=None , A : Optional[Any]=True , A : Dict=False , A : Union[str, Any]=True , A : Any=True , A : Any=None , ) -> Dict:
"""simple docstring"""
if image_path:
_UpperCAmelCase = self._get_latent(A)
else:
_UpperCAmelCase = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(A , A , A)
assert pos_prompts, "You must provide at least one positive prompt."
_UpperCAmelCase = self.process_prompts(A)
_UpperCAmelCase = self.process_prompts(A)
if save_final and save_path is None:
_UpperCAmelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts']))
if not os.path.exists(A):
os.makedirs(A)
else:
_UpperCAmelCase = save_path + '_' + get_timestamp()
os.makedirs(A)
_UpperCAmelCase = save_path
_UpperCAmelCase = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('Original Image')
show_pil(custom_to_pil(A))
_UpperCAmelCase = loop_post_process(A)
for iter, transformed_img in enumerate(self._optimize_CLIP(A , A , A)):
if show_intermediate:
show_pil(A)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png"))
if self.log:
wandb.log({'Image': wandb.Image(A)})
if show_final:
show_pil(A)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
| 290 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
SCREAMING_SNAKE_CASE_:List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> int:
"""simple docstring"""
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]:
"""simple docstring"""
A : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A : Union[str, Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
A : Any = """cpu"""
A : Any = Path(_lowerCAmelCase )
# VAE DECODER
A : Union[str, Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
A : Any = vae_decoder.config.latent_channels
# forward only through the decoder part
A : Optional[int] = vae_decoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , 25 , 25 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
SCREAMING_SNAKE_CASE_:Tuple = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 116 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=4, ):
A : List[str] = parent
A : Optional[int] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : List[str] = use_attention_mask
A : Union[str, Any] = use_token_type_ids
A : Any = use_labels
A : str = vocab_size
A : Union[str, Any] = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : Optional[Any] = hidden_act
A : Dict = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : List[Any] = initializer_range
A : str = num_choices
def _lowerCAmelCase ( self ):
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Union[str, Any] = None
if self.use_attention_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : int = None
if self.use_token_type_ids:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
A : Dict = self.prepare_config_and_inputs()
A , A , A , A : str = config_and_inputs
A : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModelTester(self )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Dict = model_class_name.from_pretrained("""albert-base-v2""" )
A : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )[0]
A : str = (1, 11, 768)
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[int] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], lowerCamelCase__, atol=1e-4 ) )
| 116 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_ ( _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = create_model(
'''HTSAT-tiny''' , '''roberta''' , _a , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_a , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Dict = R'''.*sequential.(\d+).*'''
lowerCAmelCase__ : Optional[Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase__ : int = key.replace(_a , _a )
if re.match(_a , _a ):
# replace sequential layers with list
lowerCAmelCase__ : int = re.match(_a , _a ).group(1 )
lowerCAmelCase__ : Any = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(_a )//3}.linear.' )
elif re.match(_a , _a ):
lowerCAmelCase__ : Tuple = int(re.match(_a , _a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCAmelCase__ : List[str] = 1 if projecton_layer == 0 else 2
lowerCAmelCase__ : str = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCAmelCase__ : Union[str, Any] = value
lowerCAmelCase__ : Dict = mixed_qkv.size(0 ) // 3
lowerCAmelCase__ : int = mixed_qkv[:qkv_dim]
lowerCAmelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCAmelCase__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
lowerCAmelCase__ : Any = query_layer
lowerCAmelCase__ : List[Any] = key_layer
lowerCAmelCase__ : List[str] = value_layer
else:
lowerCAmelCase__ : str = value
return model_state_dict
def lowerCamelCase_ ( _a , _a , _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = init_clap(_a , enable_fusion=_a )
clap_model.eval()
lowerCAmelCase__ : Tuple = clap_model.state_dict()
lowerCAmelCase__ : Optional[int] = rename_state_dict(_a )
lowerCAmelCase__ : Union[str, Any] = ClapConfig()
lowerCAmelCase__ : Optional[Any] = enable_fusion
lowerCAmelCase__ : Any = ClapModel(_a )
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a )
model.save_pretrained(_a )
transformers_config.save_pretrained(_a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 211 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _a ( _lowercase):
_a : Optional[int] = '''xlm-roberta-xl'''
def __init__( self : Any , _SCREAMING_SNAKE_CASE : str=25_0880 , _SCREAMING_SNAKE_CASE : Optional[Any]=2560 , _SCREAMING_SNAKE_CASE : int=36 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Any=1_0240 , _SCREAMING_SNAKE_CASE : List[str]="gelu" , _SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=514 , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : Tuple=0.02 , _SCREAMING_SNAKE_CASE : Dict=1E-05 , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any=None , **_SCREAMING_SNAKE_CASE : Tuple , )-> str:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : List[Any] = classifier_dropout
class _a ( _lowercase):
@property
def UpperCAmelCase__( self : Any )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211 | 1 |
import math
__UpperCamelCase : Any = 10
__UpperCamelCase : int = 7
__UpperCamelCase : List[Any] = BALLS_PER_COLOUR * NUM_COLOURS
def A ( _lowercase = 20 ):
SCREAMING_SNAKE_CASE : List[Any] = math.comb(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase : int = "focalnet"
def __init__( self : List[str] , __snake_case : int=2_24 , __snake_case : Union[str, Any]=4 , __snake_case : int=3 , __snake_case : Any=96 , __snake_case : List[str]=False , __snake_case : Union[str, Any]=[1_92, 3_84, 7_68, 7_68] , __snake_case : Any=[2, 2, 6, 2] , __snake_case : Optional[Any]=[2, 2, 2, 2] , __snake_case : Dict=[3, 3, 3, 3] , __snake_case : Union[str, Any]="gelu" , __snake_case : Dict=4.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Tuple=0.1 , __snake_case : List[Any]=False , __snake_case : Union[str, Any]=1E-4 , __snake_case : Any=False , __snake_case : Dict=False , __snake_case : str=False , __snake_case : Union[str, Any]=0.02 , __snake_case : int=1E-5 , __snake_case : Tuple=32 , __snake_case : Optional[Any]=None , __snake_case : Dict=None , **__snake_case : str , ):
super().__init__(**__UpperCAmelCase )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = use_conv_embed
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = focal_levels
UpperCAmelCase_ = focal_windows
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = use_layerscale
UpperCAmelCase_ = layerscale_value
UpperCAmelCase_ = use_post_layernorm
UpperCAmelCase_ = use_post_layernorm_in_modulation
UpperCAmelCase_ = normalize_modulator
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = encoder_stride
UpperCAmelCase_ = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 366 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
UpperCAmelCase_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Optional[Sequence[bool]] = None , __UpperCamelCase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase , __UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(__UpperCamelCase )
UpperCAmelCase_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : torch.Tensor , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> torch.Tensor:
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable , __UpperCamelCase : Dict[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : Any = None , __UpperCamelCase : bool = False , ) -> Any:
if not (len(__UpperCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
UpperCAmelCase_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , __UpperCamelCase )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , )
UpperCAmelCase_ = tensor_tree_map(__UpperCamelCase , __UpperCamelCase )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase , __UpperCamelCase ):
def assign(__UpperCamelCase : dict , __UpperCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
assign(__UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(__UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase )
return out
class a :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : int = 5_12 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase_ ( self : List[Any] , __snake_case : Callable , __snake_case : tuple , __snake_case : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__snake_case : int ) -> bool:
try:
with torch.no_grad():
fn(*__snake_case , chunk_size=__snake_case )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__snake_case ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase_ ( self : int , __snake_case : Iterable , __snake_case : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__snake_case , __snake_case ):
assert type(__snake_case ) == type(__snake_case )
if isinstance(__snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase_ ( self : str , __snake_case : Callable , __snake_case : tuple , __snake_case : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __snake_case : a.shape if isinstance(__snake_case , torch.Tensor ) else a , __snake_case , __snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__snake_case )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __snake_case )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__snake_case , __snake_case , __snake_case , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 177 | 0 |
import datasets
from .evaluate import evaluate
_UpperCAmelCase : Union[str, Any] = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase : Any = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase : Any = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def a ( self , snake_case , snake_case ):
snake_case_ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case_ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case_ = evaluate(dataset=snake_case , predictions=snake_case )
return score
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : int = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[int, int]:
UpperCAmelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase__ : int = x_den * y_den * z_den
UpperCAmelCase__ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ) -> int:
UpperCAmelCase__ : set = set()
UpperCAmelCase__ : int
UpperCAmelCase__ : Fraction = Fraction(0 )
UpperCAmelCase__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase__ : str = x_num * y_den + x_den * y_num
UpperCAmelCase__ : int = x_den * y_den
UpperCAmelCase__ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase__ : Optional[int] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase__ : int = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase__ : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase__ : Any = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase__ : str = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase__ : str = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase__ : str = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase__ : Union[str, Any] = x_num * y_num
UpperCAmelCase__ : Optional[Any] = x_den * y_num + x_num * y_den
UpperCAmelCase__ : Dict = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase__ : Union[str, Any] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase__ : Any = x_num * x_num * y_num * y_num
UpperCAmelCase__ : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase__ : Any = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase__ : List[str] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase__ : str = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'new-model'
if is_tf_available():
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''bert-base-cased'''
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = '''bert-base-cased'''
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = copy.deepcopy(model.config )
UpperCAmelCase__ : Tuple = ['''FunnelBaseModel''']
UpperCAmelCase__ : int = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , _A )
UpperCAmelCase__ : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config()
UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ : str = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299 | 1 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_00, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 142 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = 384
if "tiny" in model_name:
lowerCamelCase__ : Optional[int] = [3, 3, 9, 3]
lowerCamelCase__ : Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase__ : Dict = [3, 3, 27, 3]
lowerCamelCase__ : Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase__ : Optional[int] = [3, 3, 27, 3]
lowerCamelCase__ : Optional[Any] = [128, 256, 512, 1024]
lowerCamelCase__ : List[Any] = 512
if "large" in model_name:
lowerCamelCase__ : List[str] = [3, 3, 27, 3]
lowerCamelCase__ : int = [192, 384, 768, 1536]
lowerCamelCase__ : str = 768
if "xlarge" in model_name:
lowerCamelCase__ : Any = [3, 3, 27, 3]
lowerCamelCase__ : str = [256, 512, 1024, 2048]
lowerCamelCase__ : Optional[Any] = 1024
# set label information
lowerCamelCase__ : Optional[int] = 150
lowerCamelCase__ : Any = '''huggingface/label-files'''
lowerCamelCase__ : Any = '''ade20k-id2label.json'''
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Optional[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Any = ConvNextConfig(
depths=UpperCAmelCase , hidden_sizes=UpperCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : Dict = UperNetConfig(
backbone_config=UpperCAmelCase , auxiliary_in_channels=UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , )
return config
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = dct.pop(UpperCAmelCase )
lowerCamelCase__ : List[Any] = val
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowerCamelCase__ : Union[str, Any] = model_name_to_url[model_name]
lowerCamelCase__ : int = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''state_dict''']
lowerCamelCase__ : List[str] = get_upernet_config(UpperCAmelCase )
lowerCamelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ : Optional[int] = state_dict.pop(UpperCAmelCase )
if "bn" in key:
lowerCamelCase__ : str = key.replace('''bn''' , '''batch_norm''' )
lowerCamelCase__ : List[Any] = val
# rename keys
lowerCamelCase__ : List[str] = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
# verify on image
lowerCamelCase__ : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCamelCase__ : List[str] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ).convert('''RGB''' )
lowerCamelCase__ : Optional[int] = SegformerImageProcessor()
lowerCamelCase__ : Any = processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
if model_name == "upernet-convnext-tiny":
lowerCamelCase__ : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase__ : List[str] = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase__ : str = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase__ : Tuple = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 142 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = None
snake_case__ : Optional[Any] = None
snake_case__ : str = None
snake_case__ : int = None
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[int]="cls" , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=True , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
_lowerCamelCase : Optional[Any] = project_dim
_lowerCamelCase : List[str] = pooler_fn
_lowerCamelCase : Dict = learn_encoder
_lowerCamelCase : int = use_attention_mask
class __snake_case ( _lowercase):
snake_case__ : int = [R"pooler", R"logit_scale"]
snake_case__ : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
snake_case__ : Dict = "roberta"
snake_case__ : Optional[Any] = RobertaSeriesConfig
def __init__( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(lowercase_ )
_lowerCamelCase : Any = XLMRobertaModel(lowercase_ )
_lowerCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCamelCase : Any = getattr(lowercase_ , '''has_pre_transformation''' , lowercase_ )
if self.has_pre_transformation:
_lowerCamelCase : Dict = nn.Linear(config.hidden_size , config.project_dim )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ):
"""simple docstring"""
_lowerCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Dict = self.base_model(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_attentions=lowercase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowercase_ , )
if self.has_pre_transformation:
_lowerCamelCase : Optional[Any] = outputs['''hidden_states'''][-2]
_lowerCamelCase : Any = self.pre_LN(lowercase_ )
_lowerCamelCase : Any = self.transformation_pre(lowercase_ )
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_lowerCamelCase : Dict = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 369 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(A_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _lowercase : str , _lowercase : List[Any] , _lowercase : Dict) -> Union[str, Any]:
"""simple docstring"""
# Initialise PyTorch model
a__ : List[str] = BertConfig.from_json_file(_lowerCamelCase)
print(F'''Building PyTorch model from configuration: {config}''')
a__ : Optional[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : Optional[int] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 170 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 0 |
def A (__A : dict ) -> set:
"""simple docstring"""
UpperCAmelCase_ = set()
# edges = list of graph's edges
UpperCAmelCase_ = get_edges(__A )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase_ , UpperCAmelCase_ = edges.pop()
chosen_vertices.add(__A )
chosen_vertices.add(__A )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__A )
return chosen_vertices
def A (__A : dict ) -> set:
"""simple docstring"""
UpperCAmelCase_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =ReformerTokenizer
__lowerCamelCase : Optional[Any] =ReformerTokenizerFast
__lowerCamelCase : List[str] =True
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =True
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
__a = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = """<s>"""
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__lowercase ) , 1000 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = """I was born in 92000, and this is falsé."""
__a = tokenizer.tokenize(__lowercase )
__a = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(__lowercase )
__a = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
__a = """This is a simple input"""
__a = ["""This is a simple input 1""", """This is a simple input 2"""]
__a = ("""This is a simple input""", """This is a pair""")
__a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
__a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [285, 46, 10, 170, 382] , )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = """Hello World!"""
__a = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__a = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__a = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a = """ """.join(__lowercase )
__a = self.big_tokenizer.encode_plus(__lowercase , return_tensors="""pt""" )
__a = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__a = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__a = encoded_sequence["""input_ids"""].shape
__a = ReformerModel(__lowercase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# fmt: off
__a = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__a = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__lowercase , sequences=__lowercase , )
| 302 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302 | 1 |
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = []
lowercase = 1
while len(lowerCAmelCase__ ) < 1E6:
constant.append(str(lowerCAmelCase__ ) )
i += 1
lowercase = ''''''.join(lowerCAmelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 97 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(lowerCAmelCase__ )
lowercase = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(A__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(A__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(A__) is float) or (type(A__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
A__ ,A__ ,head_mask[i] ,A__ ,A__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](A__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(A__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config
lowercase = BertEmbeddings(A__)
lowercase = DeeBertEncoder(A__)
lowercase = BertPooler(A__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__)
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if encoder_attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if token_type_ids is None:
lowercase = torch.zeros(A__ ,dtype=torch.long ,device=A__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(A__ ,A__ ,A__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(A__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=A__ ,position_ids=A__ ,token_type_ids=A__ ,inputs_embeds=A__)
lowercase = self.encoder(
A__ ,attention_mask=A__ ,head_mask=A__ ,encoder_hidden_states=A__ ,encoder_attention_mask=A__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
# Pooler
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,position_ids=A__ ,head_mask=A__ ,inputs_embeds=A__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(A__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(A__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(A__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 97 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
@register_to_config
def __init__( self , lowercase = 3_2 , lowercase = 6_4 , lowercase = 2_0 , lowercase = 7_6_8 , lowercase=7_7 , lowercase=4 , lowercase = 0.0 , lowercase = "silu" , lowercase = None , lowercase = None , lowercase = "linear" , lowercase = "prd" , lowercase = None , lowercase = None , lowercase = None , ) -> Any:
super().__init__()
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = num_attention_heads * attention_head_dim
__UpperCamelCase = additional_embeddings
__UpperCamelCase = time_embed_dim or inner_dim
__UpperCamelCase = embedding_proj_dim or embedding_dim
__UpperCamelCase = clip_embed_dim or embedding_dim
__UpperCamelCase = Timesteps(lowercase , lowercase , 0 )
__UpperCamelCase = TimestepEmbedding(lowercase , lowercase , out_dim=lowercase , act_fn=lowercase )
__UpperCamelCase = nn.Linear(lowercase , lowercase )
if embedding_proj_norm_type is None:
__UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase = nn.LayerNorm(lowercase )
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__UpperCamelCase = nn.Linear(lowercase , lowercase )
if encoder_hid_proj_type is None:
__UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase = nn.Linear(lowercase , lowercase )
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowercase ) )
if added_emb_type == "prd":
__UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowercase ) )
elif added_emb_type is None:
__UpperCamelCase = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowercase , lowercase , lowercase , dropout=lowercase , activation_fn="""gelu""" , attention_bias=lowercase , )
for d in range(lowercase )
] )
if norm_in_type == "layer":
__UpperCamelCase = nn.LayerNorm(lowercase )
elif norm_in_type is None:
__UpperCamelCase = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}." )
__UpperCamelCase = nn.LayerNorm(lowercase )
__UpperCamelCase = nn.Linear(lowercase , lowercase )
__UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , lowercase , persistent=lowercase )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , lowercase ) )
__UpperCamelCase = nn.Parameter(torch.zeros(1 , lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ) -> Dict[str, AttentionProcessor]:
__UpperCamelCase = {}
def fn_recursive_add_processors(lowercase , lowercase , lowercase ):
if hasattr(lowercase , """set_processor""" ):
__UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , lowercase , lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase , lowercase , lowercase )
return processors
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(lowercase , lowercase ) and len(lowercase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(lowercase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowercase , lowercase , lowercase ):
if hasattr(lowercase , """set_processor""" ):
if not isinstance(lowercase , lowercase ):
module.set_processor(lowercase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , lowercase , lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = True , ) -> List[str]:
__UpperCamelCase = hidden_states.shape[0]
__UpperCamelCase = timestep
if not torch.is_tensor(lowercase ):
__UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
__UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase = timesteps * torch.ones(lowercase , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase = self.time_proj(lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase = self.time_embedding(lowercase )
if self.embedding_proj_norm is not None:
__UpperCamelCase = self.embedding_proj_norm(lowercase )
__UpperCamelCase = self.embedding_proj(lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase = self.encoder_hidden_states_proj(lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__UpperCamelCase = self.proj_in(lowercase )
__UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase = []
__UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase = hidden_states[:, None, :]
__UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowercase , -1 , -1 )
additional_embeds.append(lowercase )
__UpperCamelCase = torch.cat(
lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase = F.pad(
lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCamelCase = F.pad(lowercase , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase = self.norm_in(lowercase )
for block in self.transformer_blocks:
__UpperCamelCase = block(lowercase , attention_mask=lowercase )
__UpperCamelCase = self.norm_out(lowercase )
if self.prd_embedding is not None:
__UpperCamelCase = hidden_states[:, -1]
else:
__UpperCamelCase = hidden_states[:, additional_embeddings_len:]
__UpperCamelCase = self.proj_to_clip_embeddings(lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowercase )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
__UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 349 |
'''simple docstring'''
import pytest
a__ : List[str] = '__dummy_dataset1__'
a__ : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dataset_loading_script_name
__UpperCamelCase = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__A )
__UpperCamelCase = script_dir / f"{script_name}.py"
with open(__A ,"""w""" ) as f:
f.write(__A )
return str(__A )
| 349 | 1 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for node in graph )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
visited.add(_SCREAMING_SNAKE_CASE )
rec_stk.add(_SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_UpperCamelCase : ClassVar[Features] = Features({} )
_UpperCamelCase : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 269 | 0 |
"""simple docstring"""
UpperCAmelCase_ : Any = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase_ : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 91 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads'''))
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = stride
SCREAMING_SNAKE_CASE_ : List[str] = padding
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio
SCREAMING_SNAKE_CASE_ : str = mlp_ratio
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1]
for _ in range(4):
SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4):
SCREAMING_SNAKE_CASE_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
SCREAMING_SNAKE_CASE_ : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'):
SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title''']
SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 91 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
"""simple docstring"""
def __init__( self , lowercase) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = data
a__ : Optional[int] = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0]
@staticmethod
def __lowercase ( lowercase , lowercase) -> List[str]:
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xff_fff_fff
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = b'\x80' + b'\x00' * (63 - (len(self.data) + 8) % 64)
a__ : Union[str, Any] = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = list(struct.unpack('>16L' , lowercase)) + [0] * 64
for i in range(16 , 80):
a__ : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : int = self.padding()
a__ : Union[str, Any] = self.split_blocks()
for block in self.blocks:
a__ : int = self.expand_block(lowercase)
a__ : List[str] = self.h
for i in range(0 , 80):
if 0 <= i < 20:
a__ : Optional[int] = (b & c) | ((~b) & d)
a__ : Optional[int] = 0x5a_827_999
elif 20 <= i < 40:
a__ : List[Any] = b ^ c ^ d
a__ : List[Any] = 0x6e_d9e_ba1
elif 40 <= i < 60:
a__ : List[str] = (b & c) | (b & d) | (c & d)
a__ : int = 0x8f_1bb_cdc
elif 60 <= i < 80:
a__ : Any = b ^ c ^ d
a__ : int = 0xca_62c_1d6
a__ : List[Any] = (
self.rotate(lowercase , 5) + f + e + k + expanded_block[i] & 0xff_fff_fff,
a,
self.rotate(lowercase , 30),
c,
d,
)
a__ : Optional[Any] = (
self.h[0] + a & 0xff_fff_fff,
self.h[1] + b & 0xff_fff_fff,
self.h[2] + c & 0xff_fff_fff,
self.h[3] + d & 0xff_fff_fff,
self.h[4] + e & 0xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h)
def A_ ( ) -> Optional[Any]:
a__ : Tuple = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def A_ ( ) -> int:
a__ : str = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
a__ : Optional[int] = parser.parse_args()
a__ : int = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
a__ : Dict = f.read()
else:
a__ : Union[str, Any] = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 363 |
from __future__ import annotations
from collections.abc import Callable
def A_ ( A__ , A__ , A__ , A__ = 100 , ) -> float:
a__ : Dict = x_start
a__ : Any = fnc(A__ )
a__ : Optional[int] = 0.0
for _ in range(A__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a__ : Union[str, Any] = (x_end - x_start) / steps + xa
a__ : str = fnc(A__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a__ : Optional[Any] = xa
a__ : Optional[int] = fxa
return area
if __name__ == "__main__":
def A_ ( A__ ) -> List[str]:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase : Union[str, Any] = 1_0
while i <= 1_0_0_0_0_0:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 225 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[int] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 1 |
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = previous
lowerCAmelCase__ = next_node
def __str__( self ):
"""simple docstring"""
return F"{self.data}"
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.next
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.previous
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = head
def __iter__( self ):
"""simple docstring"""
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.current:
raise StopIteration
else:
lowerCAmelCase__ = self.current.get_data()
lowerCAmelCase__ = self.current.get_next()
return value
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = None # First node in list
lowerCAmelCase__ = None # Last node in list
def __str__( self ):
"""simple docstring"""
lowerCAmelCase__ = self.head
lowerCAmelCase__ = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase__ = current.get_next()
return " ".join(str(_UpperCamelCase ) for node in nodes )
def __contains__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase__ = current.get_next()
return False
def __iter__( self ):
"""simple docstring"""
return LinkedListIterator(self.head )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if self.head is None:
lowerCAmelCase__ = node
lowerCAmelCase__ = node
else:
self.insert_before_node(self.head , _UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if self.head is None:
self.set_head(_UpperCamelCase )
else:
self.insert_after_node(self.tail , _UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = Node(_UpperCamelCase )
if self.head is None:
self.set_head(_UpperCamelCase )
else:
self.set_tail(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = node
lowerCAmelCase__ = node.previous
if node.get_previous() is None:
lowerCAmelCase__ = node_to_insert
else:
lowerCAmelCase__ = node_to_insert
lowerCAmelCase__ = node_to_insert
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = node
lowerCAmelCase__ = node.next
if node.get_next() is None:
lowerCAmelCase__ = node_to_insert
else:
lowerCAmelCase__ = node_to_insert
lowerCAmelCase__ = node_to_insert
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = Node(_UpperCamelCase )
lowerCAmelCase__ = self.head
while node:
if current_position == position:
self.insert_before_node(_UpperCamelCase , _UpperCamelCase )
return
current_position += 1
lowerCAmelCase__ = node.next
self.insert_after_node(self.tail , _UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase__ = node.get_next()
raise Exception('Node not found' )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if (node := self.get_node(_UpperCamelCase )) is not None:
if node == self.head:
lowerCAmelCase__ = self.head.get_next()
if node == self.tail:
lowerCAmelCase__ = self.tail.get_previous()
self.remove_node_pointers(_UpperCamelCase )
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
if node.get_next():
lowerCAmelCase__ = node.previous
if node.get_previous():
lowerCAmelCase__ = node.next
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case : Optional[Any] = TypeVar("""KEY""")
__snake_case : str = TypeVar("""VAL""")
@dataclass(frozen=__lowercase , slots=__lowercase)
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL]):
_SCREAMING_SNAKE_CASE : KEY
_SCREAMING_SNAKE_CASE : VAL
class __SCREAMING_SNAKE_CASE ( _Item):
def __init__( self ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __bool__( self ):
"""simple docstring"""
return False
__snake_case : int = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL]):
def __init__( self , _UpperCamelCase = 8 , _UpperCamelCase = 0.75 ):
"""simple docstring"""
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return hash(_UpperCamelCase ) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
return True
else:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._get_bucket_index(_UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
break
def __setitem__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_UpperCamelCase , _UpperCamelCase )
def __delitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(_UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCamelCase )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
lowerCAmelCase__ = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 122 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = [0] * len(UpperCAmelCase_ )
_UpperCamelCase : Any = []
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(UpperCAmelCase_ )
while queue:
_UpperCamelCase : Any = queue.pop(0 )
cnt += 1
topo.append(UpperCAmelCase_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(UpperCAmelCase_ )
if cnt != len(UpperCAmelCase_ ):
print('Cycle exists' )
else:
print(UpperCAmelCase_ )
# Adjacency List of Graph
snake_case_ : List[str] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 83 |
'''simple docstring'''
import os
def snake_case_ ():
UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' )
with open(_a ) as file_hand:
return str(sum(int(_a ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 34 | 0 |
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase__ ( a__):
UpperCamelCase_ = """Salesforce/blip-image-captioning-base"""
UpperCamelCase_ = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
UpperCamelCase_ = """image_captioner"""
UpperCamelCase_ = AutoModelForVisionaSeq
UpperCamelCase_ = ["""image"""]
UpperCamelCase_ = ["""text"""]
def __init__( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : "Image" ):
'''simple docstring'''
return self.pre_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
return self.model.generate(**UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Dict ):
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0].strip()
| 370 | from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCamelCase : Union[str, Any] = datasets.load_iris()
__UpperCamelCase : Any = np.array(data['data'])
__UpperCamelCase : Dict = np.array(data['target'])
__UpperCamelCase : Union[str, Any] = data['target_names']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = train_test_split(X, y)
def A ( _lowercase , _lowercase ):
return np.linalg.norm(np.array(_lowercase ) - np.array(_lowercase ) )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=5 ):
SCREAMING_SNAKE_CASE : int = zip(_lowercase , _lowercase )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : str = []
for data_point in data:
SCREAMING_SNAKE_CASE : Optional[Any] = euclidean_distance(data_point[0] , _lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : Optional[int] = [i[1] for i in sorted(_lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : Optional[int] = Counter(_lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 258 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__lowerCamelCase : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase : str = logging.getLogger()
def A_ ( ) -> List[str]:
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase : Union[str, Any] = parser.parse_args()
return args.f
def A_ ( _lowerCAmelCase , _lowerCAmelCase="eval" ) -> Optional[int]:
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , F"""{split}_results.json""" )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , "r" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(F"""can't find {path}""" )
__lowerCamelCase : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_auto_remove_tmp_dir()
UpperCamelCase : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , "argv" , A_ ):
run_flax_glue.main()
UpperCamelCase : int = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
UpperCamelCase : Optional[Any] = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , "argv" , A_ ):
run_clm_flax.main()
UpperCamelCase : Optional[Any] = get_results(A_ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
UpperCamelCase : Optional[Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A_ , "argv" , A_ ):
run_summarization_flax.main()
UpperCamelCase : Optional[Any] = get_results(A_ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_auto_remove_tmp_dir()
UpperCamelCase : Tuple = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A_ , "argv" , A_ ):
run_mlm_flax.main()
UpperCamelCase : Dict = get_results(A_ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase : Any = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A_ , "argv" , A_ ):
run_ta_mlm_flax.main()
UpperCamelCase : Tuple = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
UpperCamelCase : int = self.get_auto_remove_tmp_dir()
UpperCamelCase : Dict = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A_ , "argv" , A_ ):
run_flax_ner.main()
UpperCamelCase : Tuple = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_auto_remove_tmp_dir()
UpperCamelCase : str = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A_ , "argv" , A_ ):
run_qa.main()
UpperCamelCase : Optional[int] = get_results(A_ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 52 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'roberta'
def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 52 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCamelCase = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
lowerCamelCase = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
lowerCamelCase = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def lowerCamelCase_ ( _a , _a , _a , _a , _a = None , _a = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase__ : List[Any] = new_id
# turn into Numpy arrays
lowerCAmelCase__ : Any = np.array(_a )
lowerCAmelCase__ : List[Any] = np.array(_a )
if reduce_labels:
lowerCAmelCase__ : str = 255
lowerCAmelCase__ : int = label - 1
lowerCAmelCase__ : List[str] = 255
lowerCAmelCase__ : Union[str, Any] = label != ignore_index
lowerCAmelCase__ : Dict = np.not_equal(_a , _a )
lowerCAmelCase__ : str = pred_label[mask]
lowerCAmelCase__ : List[Any] = np.array(_a )[mask]
lowerCAmelCase__ : Dict = pred_label[pred_label == label]
lowerCAmelCase__ : List[Any] = np.histogram(_a , bins=_a , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ : Optional[Any] = np.histogram(_a , bins=_a , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ : Optional[Any] = np.histogram(_a , bins=_a , range=(0, num_labels - 1) )[0]
lowerCAmelCase__ : Union[str, Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase_ ( _a , _a , _a , _a , _a = None , _a = False , ):
"""simple docstring"""
lowerCAmelCase__ : Any = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_a , _a ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = intersect_and_union(
_a , _a , _a , _a , _a , _a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase_ ( _a , _a , _a , _a , _a = None , _a = None , _a = False , ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = total_intersect_and_union(
_a , _a , _a , _a , _a , _a )
# compute metrics
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Optional[Any] = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase__ : Tuple = total_area_intersect / total_area_union
lowerCAmelCase__ : Optional[Any] = total_area_intersect / total_area_label
lowerCAmelCase__ : Dict = np.nanmean(_a )
lowerCAmelCase__ : int = np.nanmean(_a )
lowerCAmelCase__ : Tuple = all_acc
lowerCAmelCase__ : str = iou
lowerCAmelCase__ : str = acc
if nan_to_num is not None:
lowerCAmelCase__ : Tuple = {metric: np.nan_to_num(_a , nan=_a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE : bool = False , )-> int:
lowerCAmelCase__ : Union[str, Any] = mean_iou(
results=_SCREAMING_SNAKE_CASE , gt_seg_maps=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , ignore_index=_SCREAMING_SNAKE_CASE , nan_to_num=_SCREAMING_SNAKE_CASE , label_map=_SCREAMING_SNAKE_CASE , reduce_labels=_SCREAMING_SNAKE_CASE , )
return iou_result
| 211 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
lowerCAmelCase__ : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ : List[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_a , output_all_encodings=_a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ : Union[str, Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase__ : Optional[int] = _load_vocab(_a , _a , _a , cls=_a )
lowerCAmelCase__ : Any = nlp.model.BERTModel(
_a , len(_a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_a , use_token_type_embed=_a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_a , use_decoder=_a , )
original_bort.load_parameters(_a , cast_dtype=_a , ignore_extra=_a )
lowerCAmelCase__ : Tuple = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ : int = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_a ),
}
lowerCAmelCase__ : str = BertConfig.from_dict(_a )
lowerCAmelCase__ : Optional[Any] = BertForMaskedLM(_a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_a , _a ):
lowerCAmelCase__ : Dict = hf_param.shape
lowerCAmelCase__ : List[str] = to_torch(params[gluon_param] )
lowerCAmelCase__ : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
lowerCAmelCase__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase__ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ : BertSelfAttention = layer.attention.self
lowerCAmelCase__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
lowerCAmelCase__ : str = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
lowerCAmelCase__ : int = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
lowerCAmelCase__ : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
lowerCAmelCase__ : Any = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
lowerCAmelCase__ : Any = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
lowerCAmelCase__ : BertSelfOutput = layer.attention.output
lowerCAmelCase__ : Dict = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
lowerCAmelCase__ : Optional[int] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
lowerCAmelCase__ : int = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
lowerCAmelCase__ : List[str] = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
lowerCAmelCase__ : BertIntermediate = layer.intermediate
lowerCAmelCase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
lowerCAmelCase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
lowerCAmelCase__ : BertOutput = layer.output
lowerCAmelCase__ : Optional[int] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
lowerCAmelCase__ : int = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
lowerCAmelCase__ : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
lowerCAmelCase__ : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase__ : List[str] = tokenizer.encode_plus(_a )['''input_ids''']
# Get gluon output
lowerCAmelCase__ : str = mx.nd.array([input_ids] )
lowerCAmelCase__ : List[str] = original_bort(inputs=_a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_a )
lowerCAmelCase__ : Optional[int] = BertModel.from_pretrained(_a )
hf_bort_model.eval()
lowerCAmelCase__ : Tuple = tokenizer.encode_plus(_a , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[Any] = hf_bort_model(**_a )[0]
lowerCAmelCase__ : str = output_gluon[0].asnumpy()
lowerCAmelCase__ : Optional[Any] = output_hf[0].detach().numpy()
lowerCAmelCase__ : str = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ : int = np.allclose(_a , _a , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 211 | 1 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase_ : str = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def _A (__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : Tuple = R'''.*/layers_(\d+)'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = key
if re.match(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = R'''(encoder|decoder)\/'''
if re.match(__a , __a ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.match(__a , __a ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Optional[int] = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , __a )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , __a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_key.replace(__a , __a )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Any = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : Any = s_dict[key]
for idx in range(__a ):
SCREAMING_SNAKE_CASE_ : str = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(__a )
return s_dict
UpperCAmelCase_ : Tuple = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def _A (__a , __a ) -> List[Any]:
"""simple docstring"""
import regex as re
with open(__a , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
SCREAMING_SNAKE_CASE_ : Any = re.findall(R'''(.*) = ([0-9.]*)''' , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = float(__a ) if '''.''' in value else int(__a )
SCREAMING_SNAKE_CASE_ : Dict = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , __a )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_experts
SCREAMING_SNAKE_CASE_ : List[str] = SwitchTransformersConfig(**__a )
return config
def _A (__a , __a , __a=None , __a="./" , __a=8 ) -> Union[str, Any]:
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_gin_to_config(__a , __a )
else:
SCREAMING_SNAKE_CASE_ : int = SwitchTransformersConfig.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersForConditionalGeneration(__a )
SCREAMING_SNAKE_CASE_ : Dict = flax_params['''target''']
SCREAMING_SNAKE_CASE_ : Any = flatten_dict(__a , sep='''/''' )
SCREAMING_SNAKE_CASE_ : List[Any] = rename_keys(__a )
SCREAMING_SNAKE_CASE_ : int = unflatten_dict(__a , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a , __a )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
UpperCAmelCase_ : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 91 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = scope
SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : List[str] = t
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
return True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self)
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Dict = type
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_)
SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768])
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
| 91 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
# Construct model
if openai_config_file == "":
UpperCamelCase = OpenAIGPTConfig()
else:
UpperCamelCase = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
UpperCamelCase = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
UpperCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __UpperCamelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 357 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> Dict:
UpperCamelCase = tesseract_config if tesseract_config is not None else """"""
# apply OCR
UpperCamelCase = to_pil_image(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(__UpperCamelCase , lang=__UpperCamelCase , output_type="""dict""" , config=__UpperCamelCase )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(__UpperCamelCase ) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(__UpperCamelCase )
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
assert len(__UpperCamelCase ) == len(__UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( lowerCamelCase ):
lowercase = ["""pixel_values"""]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "" , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = size if size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase = (size["""height"""], size["""width"""])
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase ,UpperCamelCase = apply_tesseract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
words_batch.append(_SCREAMING_SNAKE_CASE )
boxes_batch.append(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase = [flip_channel_order(_SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=_SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 183 | 0 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=64 , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=37 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Optional[Any] = parent
__A : List[str] = batch_size
__A : str = seq_length
__A : Any = is_training
__A : Dict = use_input_mask
__A : Tuple = use_token_type_ids
__A : Any = use_labels
__A : Dict = vocab_size
__A : Any = hidden_size
__A : Dict = embedding_size
__A : str = num_hidden_layers
__A : Dict = num_attention_heads
__A : Dict = intermediate_size
__A : Optional[int] = hidden_act
__A : int = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : str = type_vocab_size
__A : Optional[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : int = num_labels
__A : Union[str, Any] = num_choices
__A : Optional[int] = scope
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Union[str, Any] = None
if self.use_input_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : str = None
if self.use_token_type_ids:
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : int = None
__A : str = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : str = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = MegatronBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
__A : int = model(lowercase_ , token_type_ids=lowercase_ )
__A : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = MegatronBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = MegatronBertForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = MegatronBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : Dict = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = MegatronBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = MegatronBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = self.num_labels
__A : Union[str, Any] = MegatronBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
__A : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : int = self.num_labels
__A : List[str] = MegatronBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = self.num_choices
__A : int = MegatronBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__A : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Dict = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : List[str] = config_and_inputs
__A : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
# test_resize_embeddings = False
_lowerCamelCase = False
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
'''simple docstring'''
__A : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
__A : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
__A : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = MegatronBertModelTester(self )
__A : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase_ )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase_ )
def __lowercase ( snake_case_ : int ) ->Any:
'''simple docstring'''
return torch.tensor(
SCREAMING_SNAKE_CASE__ ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ ,)
a_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__A : str = os.path.join(os.environ['''MYDIR'''] , lowercase_ )
__A : Tuple = MegatronBertModel.from_pretrained(lowercase_ )
model.to(lowercase_ )
model.half()
__A : List[str] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
__A : str = model(lowercase_ )[0]
__A : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , lowercase_ )
__A : List[str] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__A : Tuple = output[0, ii, jj]
__A : Optional[int] = expected[3 * ii + jj]
__A : int = '''ii={} jj={} a={} b={}'''.format(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertTrue(math.isclose(lowercase_ , lowercase_ , rel_tol=lowercase_ , abs_tol=lowercase_ ) , msg=lowercase_ )
| 179 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (DPMSolverSinglestepScheduler,)
lowerCamelCase = (('num_inference_steps', 25),)
def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int:
'''simple docstring'''
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
return sample
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 5_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,)
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
A__ = self.full_loop(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase_,time_step=0 )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop(use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction' )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowercase__ ( _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The csv file to plot.'}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Disable logarithmic scale when plotting'}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'}, )
_lowerCamelCase = list_field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowercase__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
try:
int(_UpperCAmelCase )
return True
except ValueError:
return False
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
try:
float(_UpperCAmelCase )
return True
except ValueError:
return False
class a__ :
def __init__( self : Optional[Any], lowerCAmelCase : List[str] ) -> Tuple:
lowercase : List[str] = args
lowercase : str = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file, newline='' ) as csv_file:
lowercase : Any = csv.DictReader(__UpperCAmelCase )
for row in reader:
lowercase : Any = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowercase : Dict = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowercase : int = float(row['result'] )
def lowercase ( self : Optional[Any] ) -> int:
lowercase , lowercase : Any = plt.subplots()
lowercase : Any = 'Time usage' if self.args.is_time else 'Memory usage'
lowercase : Tuple = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowercase : Optional[Any] = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowercase : Union[str, Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowercase : Any = self.result_dict[model_name]['result']
((lowercase) , (lowercase)) : Dict = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowercase : List[Any] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowercase : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], dtype=__UpperCAmelCase, )
else:
lowercase : Any = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.floataa, )
((lowercase) , (lowercase)) : str = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowercase : Tuple = np.asarray(__UpperCAmelCase, __UpperCAmelCase )[: len(__UpperCAmelCase )]
plt.scatter(
__UpperCAmelCase, __UpperCAmelCase, label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__UpperCAmelCase, __UpperCAmelCase, '--' )
title_str += f''' {label_model_name} vs.'''
lowercase : str = title_str[:-4]
lowercase : Any = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__UpperCAmelCase )
plt.xlabel(__UpperCAmelCase )
plt.ylabel(__UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowercase__ ( ) -> Any:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser(_UpperCAmelCase )
lowercase : int = parser.parse_args_into_dataclasses()[0]
lowercase : Tuple = Plot(args=_UpperCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 354 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase: Optional[int] = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'gpt_neo'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[Any], lowerCAmelCase : int=50257, lowerCAmelCase : Tuple=2048, lowerCAmelCase : int=2048, lowerCAmelCase : Tuple=24, lowerCAmelCase : Optional[Any]=[[["global", "local"], 12]], lowerCAmelCase : Optional[int]=16, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : Dict=256, lowerCAmelCase : Optional[int]="gelu_new", lowerCAmelCase : Any=0.0, lowerCAmelCase : Dict=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[Any]=1e-5, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : Dict=True, lowerCAmelCase : int=50256, lowerCAmelCase : Optional[Any]=50256, **lowerCAmelCase : Any, ) -> Optional[Any]:
lowercase : List[Any] = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : Optional[Any] = num_layers
lowercase : str = num_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = window_size
lowercase : Dict = activation_function
lowercase : Dict = resid_dropout
lowercase : int = embed_dropout
lowercase : Optional[Any] = attention_dropout
lowercase : Tuple = classifier_dropout
lowercase : Optional[int] = layer_norm_epsilon
lowercase : Dict = initializer_range
lowercase : Optional[Any] = use_cache
lowercase : Union[str, Any] = bos_token_id
lowercase : int = eos_token_id
lowercase : str = attention_types
lowercase : int = self.expand_attention_types_params(lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
@staticmethod
def lowercase ( lowerCAmelCase : str ) -> Optional[Any]:
lowercase : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
import torch
lowercase : Dict = input.size()
lowercase : Optional[int] = len(_UpperCAmelCase )
lowercase : str = shape[dimension]
lowercase : Optional[Any] = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor' ) + 1
lowercase : Any = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None]
lowercase : List[Any] = [slice(_UpperCAmelCase )] * rank
lowercase : int = indices
lowercase : Optional[Any] = input[s]
lowercase : str = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
import torch
lowercase : int = torch.arange(1 , _UpperCAmelCase )
lowercase : List[str] = torch.remainder(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[int] = remainders == 0
lowercase : Tuple = candidates[divisor_indices]
lowercase : Any = torch.max(_UpperCAmelCase )
return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor' )
class a__ ( SCREAMING_SNAKE_CASE__ ):
@property
def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
lowercase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase, direction='inputs' )
lowercase : Dict = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowercase : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : int ) -> int:
return self._config.num_heads
def lowercase ( self : Tuple, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Mapping[str, Any]:
lowercase : Union[str, Any] = super(lowerCAmelCase, self ).generate_dummy_inputs(
lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase , lowercase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase : Tuple = seqlen + 2
lowercase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Any = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
lowercase : Optional[int] = common_inputs['attention_mask']
if self.use_past:
lowercase : Optional[int] = ordered_inputs['attention_mask'].dtype
lowercase : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 )
return ordered_inputs
@property
def lowercase ( self : int ) -> int:
return 13
| 53 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any]=2_8_1_2_3 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCAmelCase_ :Dict = set()
lowerCAmelCase_ :Union[str, Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 84 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = False):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.')
# array bounds provided by analysis
SCREAMING_SNAKE_CASE = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_UpperCAmelCase , 1):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE = primes[:idx]
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE = False
for r in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = pow(_UpperCAmelCase , d * 2**r , _UpperCAmelCase)
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase__ ():
assert not miller_rabin(561)
assert miller_rabin(563)
# 2047
assert not miller_rabin(83_8201)
assert miller_rabin(83_8207)
# 1_373_653
assert not miller_rabin(1731_6001)
assert miller_rabin(1731_6017)
# 25_326_001
assert not miller_rabin(30_7838_6641)
assert miller_rabin(30_7838_6653)
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801)
assert miller_rabin(1_7130_4557_4819)
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307)
assert miller_rabin(2_7797_9972_8327)
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441)
assert miller_rabin(113_8500_2390_9527)
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351)
assert miller_rabin(127_5041_0188_4880_4391)
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867)
assert miller_rabin(796_6646_4458_5077_8779_1951)
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333)
assert miller_rabin(5528_4067_7446_6478_9766_0359)
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 327 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ (_UpperCAmelCase):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _snake_case ( nn.Module ):
def __init__( self , a , a) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a) , nn.Linear(a , module.out_features , bias=a) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def SCREAMING_SNAKE_CASE__ ( self , a , *a , **a) -> Any:
return self.module(a , *a , **a) + self.adapter(a)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowercase : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
_lowercase : str = 2.109_6595_5269_2574
_lowercase : Any = '''Hello my name is'''
_lowercase : Any = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
_lowercase : Union[str, Any] = 10
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , 'quantization_config'))
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
with self.assertRaises(a), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaises(a):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(a):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa)
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to('cpu')
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=a , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple:
SCREAMING_SNAKE_CASE = 't5-small'
SCREAMING_SNAKE_CASE = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name)
SCREAMING_SNAKE_CASE = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
SCREAMING_SNAKE_CASE = modules
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto')
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt').to(0)
SCREAMING_SNAKE_CASE = model.generate(**a)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map='auto')
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto')
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map='auto')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a) , self.EXPECTED_OUTPUTS)
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a)):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16)
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a)
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(a , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _snake_case ( A__ ):
_lowercase : str = '''gpt2-xl'''
_lowercase : Union[str, Any] = 3.3191_8548_5415_2187
| 327 | 1 |
from collections.abc import Callable
class _A :
def __init__( self : str , _A : Callable | None = None ) -> None:
"""simple docstring"""
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Any = key or (lambda _A : x)
def __a ( self : Tuple , _A : int ) -> int | None:
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def __a ( self : int , _A : int ) -> int | None:
"""simple docstring"""
lowercase : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __a ( self : Optional[int] , _A : int ) -> int | None:
"""simple docstring"""
lowercase : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __a ( self : int , _A : int , _A : int ) -> None:
"""simple docstring"""
lowercase , lowercase : str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : Union[str, Any] = self.arr[j], self.arr[i]
def __a ( self : List[str] , _A : int , _A : int ) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def __a ( self : int , _A : int ) -> int:
"""simple docstring"""
lowercase : Any = self._left(_A )
lowercase : Any = self._right(_A )
lowercase : Tuple = i
if left is not None and not self._cmp(_A , _A ):
lowercase : str = left
if right is not None and not self._cmp(_A , _A ):
lowercase : Tuple = right
return valid_parent
def __a ( self : Any , _A : int ) -> None:
"""simple docstring"""
lowercase : str = self._parent(_A )
while parent is not None and not self._cmp(_A , _A ):
self._swap(_A , _A )
lowercase , lowercase : Optional[Any] = parent, self._parent(_A )
def __a ( self : int , _A : int ) -> None:
"""simple docstring"""
lowercase : List[Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A , _A )
lowercase , lowercase : Tuple = valid_parent, self._get_valid_parent(_A )
def __a ( self : List[str] , _A : int , _A : int ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowercase : Optional[Any] = self.pos_map[item]
lowercase : Union[str, Any] = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __a ( self : Optional[Any] , _A : int ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowercase : Tuple = self.pos_map[item]
del self.pos_map[item]
lowercase : Tuple = self.arr[self.size - 1]
lowercase : str = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __a ( self : List[str] , _A : int , _A : int ) -> None:
"""simple docstring"""
lowercase : Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
lowercase : Any = [item, self.key(_A )]
lowercase : List[str] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __a ( self : str ) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def __a ( self : List[Any] ) -> tuple | None:
"""simple docstring"""
lowercase : List[str] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def snake_case( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 308 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 308 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE):
snake_case__ : Any = "poolformer"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int=1_6 , __lowerCAmelCase : str=3 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : List[str]=[2, 2, 6, 2] , __lowerCAmelCase : int=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : str=[7, 3, 3, 3] , __lowerCAmelCase : List[str]=[4, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[2, 1, 1, 1] , __lowerCAmelCase : str=4 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : int=1E-5 , __lowerCAmelCase : List[Any]=0.02 , **__lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : List[str] = stride
_lowerCamelCase : Dict = padding
_lowerCamelCase : Tuple = pool_size
_lowerCamelCase : Optional[Any] = hidden_sizes
_lowerCamelCase : List[Any] = mlp_ratio
_lowerCamelCase : List[str] = depths
_lowerCamelCase : Union[str, Any] = patch_sizes
_lowerCamelCase : Dict = strides
_lowerCamelCase : Dict = num_encoder_blocks
_lowerCamelCase : Any = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[Any] = use_layer_scale
_lowerCamelCase : Tuple = layer_scale_init_value
_lowerCamelCase : Union[str, Any] = initializer_range
super().__init__(**__UpperCAmelCase )
class __snake_case ( __SCREAMING_SNAKE_CASE):
snake_case__ : List[str] = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return 2E-3
| 366 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
else:
_lowerCamelCase : str = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : Any = ProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
_lowerCamelCase : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_lowerCamelCase : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
_lowerCamelCase : str = prophet
_lowerCamelCase : List[Any] = prophet_old
else:
_lowerCamelCase : Optional[int] = prophet.prophetnet
_lowerCamelCase : Optional[Any] = prophet_old.model
_lowerCamelCase : Any = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : Optional[int] = mapping[attribute]
if not hasattr(A_, A_ ) and len(A_ ) > 0:
_lowerCamelCase : int = attribute
elif hasattr(A_, A_ ):
_lowerCamelCase : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Optional[int] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_lowerCamelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : int = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_lowerCamelCase : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(A_, '''in_proj_weight''' ):
_lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : List[Any] = getattr(A_, A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_lowerCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_lowerCamelCase : Optional[Any] = True
break
if attribute.isdigit():
_lowerCamelCase : Optional[int] = model[int(A_ )]
_lowerCamelCase : List[Any] = old_model[int(A_ )]
else:
_lowerCamelCase : List[str] = getattr(A_, A_ )
if old_attribute == "":
_lowerCamelCase : str = old_model
else:
if not hasattr(A_, A_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_lowerCamelCase : Optional[int] = getattr(A_, A_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 175 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 88 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__)
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__)
])
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : bool = True , ) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4)
A__ = self.norm(UpperCAmelCase__)
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.proj_in(UpperCAmelCase__)
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__)
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__)
| 14 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase: Optional[int] = datasets.load_iris()
lowerCAmelCase: List[Any] = np.array(data['data'])
lowerCAmelCase: Union[str, Any] = np.array(data['target'])
lowerCAmelCase: Optional[int] = data['target_names']
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase: Dict = train_test_split(X, y)
def lowerCamelCase__ ( _A , _A ):
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def lowerCamelCase__ ( _A , _A , _A , _A , _A=5 ):
a : Union[str, Any] = zip(_A , _A )
# List of distances of all points from the point to be classified
a : Union[str, Any] = []
for data_point in data:
a : Dict = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a : int = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a : Dict = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 96 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase: Optional[int] = parser.parse_args()
lowerCAmelCase: List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase: Optional[Any] = CLIPImageProcessor()
lowerCAmelCase: Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase: List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 96 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __lowercase : List[str] , __lowercase : List[Any]=7 , __lowercase : str=3 , __lowercase : Optional[Any]=18 , __lowercase : Union[str, Any]=30 , __lowercase : int=400 , __lowercase : Optional[int]=True , __lowercase : int=None , __lowercase : Optional[int]=True , __lowercase : List[str]=None , __lowercase : int=True , __lowercase : Optional[int]=[0.5, 0.5, 0.5] , __lowercase : List[str]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__lowercase =size if size is not None else {'shortest_edge': 18}
__lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
__lowercase =parent
__lowercase =batch_size
__lowercase =num_channels
__lowercase =image_size
__lowercase =min_resolution
__lowercase =max_resolution
__lowercase =do_resize
__lowercase =size
__lowercase =do_center_crop
__lowercase =crop_size
__lowercase =do_normalize
__lowercase =image_mean
__lowercase =image_std
def snake_case ( self : Any ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = LevitImageProcessor if is_vision_available() else None
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =LevitImageProcessingTester(self )
@property
def snake_case ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , 'image_mean' ) )
self.assertTrue(hasattr(__lowercase , 'image_std' ) )
self.assertTrue(hasattr(__lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowercase , 'do_resize' ) )
self.assertTrue(hasattr(__lowercase , 'do_center_crop' ) )
self.assertTrue(hasattr(__lowercase , 'size' ) )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__lowercase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
pass
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowercase =image_processing(__lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 141 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
__lowercase ={}
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , *__lowercase : Optional[int] , **__lowercase : int ):
"""simple docstring"""
__lowercase =super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self : int , __lowercase : List[Any] , *__lowercase : Union[str, Any] , __lowercase : Dict=1 , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
__lowercase =[]
for i in range(__lowercase ):
__lowercase =placeholder_token + f'''_{i}'''
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
__lowercase =output
def snake_case ( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Optional[int]=1.0 ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
__lowercase =[]
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase =self.token_map[placeholder_token]
__lowercase =tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase =copy.copy(__lowercase )
random.shuffle(__lowercase )
__lowercase =text.replace(__lowercase , ' '.join(__lowercase ) )
return text
def __call__( self : int , __lowercase : List[Any] , *__lowercase : Tuple , __lowercase : Optional[Any]=False , __lowercase : Dict=1.0 , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def snake_case ( self : Dict , __lowercase : List[str] , *__lowercase : Tuple , __lowercase : Dict=False , __lowercase : List[str]=1.0 , **__lowercase : Optional[int] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 141 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
__UpperCamelCase =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =matrix_length // 2
__UpperCamelCase =[[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase )]
__UpperCamelCase =[
[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )
]
__UpperCamelCase =[[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )]
__UpperCamelCase =[[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
return len(__UpperCamelCase ), len(matrix[0] )
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
print('''\n'''.join(str(__UpperCamelCase ) for line in matrix ) )
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =split_matrix(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =split_matrix(__UpperCamelCase )
__UpperCamelCase =actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
__UpperCamelCase =actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
__UpperCamelCase =actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase )
__UpperCamelCase =matrix_addition(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =matrix_addition(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase )
# construct the new matrix from our 4 quadrants
__UpperCamelCase =[]
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]:
__UpperCamelCase =(
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__UpperCamelCase )
__UpperCamelCase =matrix_dimensions(__UpperCamelCase )
__UpperCamelCase =matrix_dimensions(__UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__UpperCamelCase =max(*__UpperCamelCase , *__UpperCamelCase )
__UpperCamelCase =int(math.pow(2 , math.ceil(math.loga(__UpperCamelCase ) ) ) )
__UpperCamelCase =matrixa
__UpperCamelCase =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__UpperCamelCase =actual_strassen(__UpperCamelCase , __UpperCamelCase )
# Removing the additional zeros
for i in range(0 , __UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 85 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
@slow
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
| 88 | from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A ( _lowercase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 182 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , A : UNetaDModel , A : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : Dict , A : int = 1 , A : int = 2_0_0_0 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[str] = "pil" , A : bool = True , **A : Union[str, Any] , ):
_UpperCAmelCase : Union[str, Any] = self.unet.config.sample_size
_UpperCAmelCase : Any = (batch_size, 3, img_size, img_size)
_UpperCAmelCase : Optional[int] = self.unet
_UpperCAmelCase : Optional[Any] = randn_tensor(A , generator=A ) * self.scheduler.init_noise_sigma
_UpperCAmelCase : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_UpperCAmelCase : int = self.unet(A , A ).sample
_UpperCAmelCase : int = self.scheduler.step_correct(A , A , generator=A ).prev_sample
# prediction step
_UpperCAmelCase : Optional[int] = model(A , A ).sample
_UpperCAmelCase : int = self.scheduler.step_pred(A , A , A , generator=A )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = output.prev_sample, output.prev_sample_mean
_UpperCAmelCase : Optional[Any] = sample_mean.clamp(0 , 1 )
_UpperCAmelCase : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 202 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Union[str, Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : List[Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
_UpperCAmelCase : List[str] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : List[Any] , **A : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , **A : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[str] , **A : Optional[Any] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : str ):
_UpperCAmelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Any = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = image_processor(A , return_tensors="np" )
_UpperCAmelCase : Any = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Optional[int] = "lower newer"
_UpperCAmelCase : Union[str, Any] = processor(text=A )
_UpperCAmelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : int ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[str] = processor.batch_decode(A )
_UpperCAmelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : int = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 202 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCamelCase_ : Any = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowerCamelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : str = 'https://pypi.org/pypi/diffusers/json'
A_ : List[str] = json.loads(request.urlopen(_UpperCAmelCase ).read() )['releases'].keys()
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : version.Version(_UpperCAmelCase ) )
def UpperCAmelCase__ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
A_ : Any = Path(_UpperCAmelCase ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
init_hf_modules()
A_ : Dict = Path(_UpperCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
A_ : Tuple = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
A_ : Optional[Any] = f.read()
# Imports of the form `import .xxx`
A_ : Optional[int] = re.findall('^\s*import\s+\.(\S+)\s*$' , _UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , _UpperCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_UpperCAmelCase ) )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = False
A_ : Dict = [module_file]
A_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
A_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_UpperCAmelCase ) )
A_ : int = Path(_UpperCAmelCase ).parent
A_ : Tuple = [str(module_path / m ) for m in new_imports]
A_ : str = [f for f in new_import_files if f not in all_relative_imports]
A_ : int = [f"""{f}.py""" for f in new_import_files]
A_ : Optional[Any] = len(_UpperCAmelCase ) == 0
all_relative_imports.extend(_UpperCAmelCase )
return all_relative_imports
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
A_ : Any = f.read()
# Imports of the form `import xxx`
A_ : Any = re.findall('^\s*import\s+(\S+)\s*$' , _UpperCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , _UpperCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
A_ : Any = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
A_ : List[Any] = list(set(_UpperCAmelCase ) )
A_ : Any = []
for imp in imports:
try:
importlib.import_module(_UpperCAmelCase )
except ImportError:
missing_packages.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{", ".join(_UpperCAmelCase )}. Run `pip install {" ".join(_UpperCAmelCase )}`""" )
return get_relative_imports(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = module_path.replace(os.path.sep , '.' )
A_ : int = importlib.import_module(_UpperCAmelCase )
if class_name is None:
return find_pipeline_class(_UpperCAmelCase )
return getattr(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
A_ : int = dict(inspect.getmembers(_UpperCAmelCase , inspect.isclass ) )
A_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _UpperCAmelCase )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
A_ : Union[str, Any] = cls
return pipeline_class
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
"""simple docstring"""
A_ : int = str(_UpperCAmelCase )
A_ : Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
A_ : List[str] = module_file_or_url
A_ : Union[str, Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
A_ : Union[str, Any] = get_diffusers_versions()
# cut ".dev0"
A_ : Optional[Any] = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
A_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
A_ : Optional[int] = f"""v{revision}"""
elif revision == "main":
A_ : Union[str, Any] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
A_ : List[Any] = COMMUNITY_PIPELINES_URL.format(revision=_UpperCAmelCase , pipeline=_UpperCAmelCase )
try:
A_ : Dict = cached_download(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , )
A_ : Optional[int] = 'git'
A_ : Optional[Any] = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
A_ : Optional[Any] = hf_hub_download(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , )
A_ : str = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
A_ : Optional[int] = check_imports(_UpperCAmelCase )
# Now we move the module inside our cached dynamic modules.
A_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_UpperCAmelCase )
A_ : Optional[Any] = Path(_UpperCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_UpperCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
A_ : Tuple = f"""{module_needed}.py"""
shutil.copy(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Union[str, Any] = use_auth_token
elif use_auth_token is True:
A_ : List[Any] = HfFolder.get_token()
else:
A_ : Dict = None
A_ : List[Any] = model_info(_UpperCAmelCase , revision=_UpperCAmelCase , token=_UpperCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ : Optional[Any] = submodule_path / commit_hash
A_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_UpperCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_UpperCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_UpperCAmelCase , f"""{module_needed}.py""" , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
return os.path.join(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
"""simple docstring"""
A_ : Optional[Any] = get_cached_module_file(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
return get_class_in_module(_UpperCAmelCase , final_module.replace('.py' , '' ) ) | 286 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
super().__init__(*snake_case_ , **snake_case_ )
A_ : Tuple = {}
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=1 , **snake_case_ ):
"""simple docstring"""
A_ : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
A_ : Tuple = []
for i in range(snake_case_ ):
A_ : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A_ : Any = output
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , snake_case_=1.0 ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : Optional[Any] = []
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : List[Any] = self.token_map[placeholder_token]
A_ : Optional[int] = tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : Optional[Any] = copy.copy(snake_case_ )
random.shuffle(snake_case_ )
A_ : List[str] = text.replace(snake_case_ , ' '.join(snake_case_ ) )
return text
def __call__( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , ) | 286 | 1 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
lowercase__ = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE__ ) == len(set(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowercase_ = input("""Enter a string """).strip()
lowercase_ = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 367 |
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = 0
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
A__ = CLIPImageProcessor(**UpperCamelCase )
# save in new folder
model_config.save_pretrained(UpperCamelCase )
config.save_pretrained(UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase , """clip-base is not a local folder and is not a valid model identifier""" ):
A__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase , revision="""aaaaaa""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase ):
A__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase ):
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoImageProcessor.register(UpperCamelCase , UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase ):
AutoImageProcessor.register(UpperCamelCase , UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCamelCase ) / """preprocessor_config.json"""
A__ = Path(UpperCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCamelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCamelCase , """w""" ) )
A__ = CustomImageProcessor.from_pretrained(UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = True
try:
AutoConfig.register("""custom""" , UpperCamelCase )
AutoImageProcessor.register(UpperCamelCase , UpperCamelCase )
# If remote code is not set, the default is to use local
A__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCamelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 335 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-10 ):
UpperCAmelCase_ = a
while True:
UpperCAmelCase_ = Decimal(lowerCAmelCase__ ) - (
Decimal(eval(lowerCAmelCase__ ) ) / Decimal(eval(str(diff(lowerCAmelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase__ ) ) < precision: # noqa: S307
return float(lowerCAmelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 363 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase = 250_004
lowerCamelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''facebook/mbart-large-en-ro'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = MBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 241 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCamelCase = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
__lowerCamelCase = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
__lowerCamelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase ( __lowerCamelCase : tuple ):
return x[0]
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : List[Any] = get_letter_count(__lowerCamelCase )
snake_case : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
snake_case : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowerCamelCase )
snake_case : Optional[Any] = "".join(freq_to_letter[freq] )
snake_case : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase , reverse=__lowerCamelCase )
snake_case : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Dict = get_frequency_order(__lowerCamelCase )
snake_case : List[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
__lowerCamelCase = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = LxmertTokenizer
def __init__(self : Dict , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Optional[Any]="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : str = do_lower_case
snake_case : List[Any] = strip_accents
snake_case : Optional[int] = tokenize_chinese_chars
snake_case : int = normalizer_class(**snake_case__ )
snake_case : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 59 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 357 | from __future__ import annotations
lowercase = list[list[int]]
# assigning initial values to the grid
lowercase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = digit
if sudoku(UpperCamelCase__ ) is not None:
return grid
UpperCamelCase__ = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(UpperCamelCase__, end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowercase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 35 | 0 |
'''simple docstring'''
from typing import Any
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ,__UpperCamelCase: dict ,__UpperCamelCase: dict ,__UpperCamelCase: dict ,):
"""simple docstring"""
_validation(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE : dict = {}
SCREAMING_SNAKE_CASE : dict = {}
for state in states_space:
SCREAMING_SNAKE_CASE : Any = observations_space[0]
SCREAMING_SNAKE_CASE : Dict = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : List[Any] = observations_space[o]
SCREAMING_SNAKE_CASE : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE : List[Any] = ''
SCREAMING_SNAKE_CASE : Dict = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE : Optional[Any] = probability
SCREAMING_SNAKE_CASE : Optional[Any] = k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_max
# The final observation
SCREAMING_SNAKE_CASE : str = observations_space[len(__UpperCamelCase ) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE : str = ''
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE : Dict = probability
SCREAMING_SNAKE_CASE : Any = k_state
SCREAMING_SNAKE_CASE : Any = arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE : Union[str, Any] = last_state
SCREAMING_SNAKE_CASE : Optional[int] = []
for o in range(len(__UpperCamelCase ) - 1 ,-1 ,-1 ):
result.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,):
"""simple docstring"""
_validate_not_empty(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
_validate_lists(__UpperCamelCase ,__UpperCamelCase )
_validate_dicts(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ):
"""simple docstring"""
_validate_list(__UpperCamelCase ,'observations_space' )
_validate_list(__UpperCamelCase ,'states_space' )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: str ):
"""simple docstring"""
if not isinstance(_object ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : int = f"{var_name} must be a list"
raise ValueError(__UpperCamelCase )
else:
for x in _object:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = f"{var_name} must be a list of strings"
raise ValueError(__UpperCamelCase )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: Any ,):
"""simple docstring"""
_validate_dict(__UpperCamelCase ,'initial_probabilities' ,__UpperCamelCase )
_validate_nested_dict(__UpperCamelCase ,'transition_probabilities' )
_validate_nested_dict(__UpperCamelCase ,'emission_probabilities' )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: str ):
"""simple docstring"""
_validate_dict(_object ,__UpperCamelCase ,__UpperCamelCase )
for x in _object.values():
_validate_dict(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: str ,__UpperCamelCase: type ,__UpperCamelCase: bool = False ):
"""simple docstring"""
if not isinstance(_object ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = f"{var_name} must be a dict"
raise ValueError(__UpperCamelCase )
if not all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for x in _object ):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{var_name} all keys must be strings"
raise ValueError(__UpperCamelCase )
if not all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for x in _object.values() ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 'nested dictionary ' if nested else ''
SCREAMING_SNAKE_CASE : Optional[int] = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251 | 1 |
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 366 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin | 320 | 1 |
def __lowerCamelCase ( snake_case__ = 1_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) / 6
_SCREAMING_SNAKE_CASE = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 125 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Optional[Any] = (UnCLIPScheduler,)
def UpperCamelCase ( self: int , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCAmelCase_ )
return config
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(variance_type="""fixed_small_log""" )
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(variance_type="""learned_range""" )
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCAmelCase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCAmelCase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCAmelCase_ ) - -0.0_01_00_11 < 1E-5
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for i, t in enumerate(UpperCAmelCase_ ):
# 1. predict noise residual
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_SCREAMING_SNAKE_CASE = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
_SCREAMING_SNAKE_CASE = pred_prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(25 )
_SCREAMING_SNAKE_CASE = scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for i, t in enumerate(UpperCAmelCase_ ):
# 1. predict noise residual
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
if i + 1 == timesteps.shape[0]:
_SCREAMING_SNAKE_CASE = None
else:
_SCREAMING_SNAKE_CASE = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_SCREAMING_SNAKE_CASE = scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
_SCREAMING_SNAKE_CASE = pred_prev_sample
_SCREAMING_SNAKE_CASE = torch.sum(torch.abs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
| 125 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __snake_case ( __lowerCAmelCase ):
a__ = """yolos"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=[5_12, 8_64] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=1_00 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase)
a__: int = hidden_size
a__: Optional[Any] = num_hidden_layers
a__: str = num_attention_heads
a__: List[str] = intermediate_size
a__: Optional[Any] = hidden_act
a__: str = hidden_dropout_prob
a__: Union[str, Any] = attention_probs_dropout_prob
a__: Optional[int] = initializer_range
a__: int = layer_norm_eps
a__: List[str] = image_size
a__: Optional[int] = patch_size
a__: Optional[int] = num_channels
a__: List[str] = qkv_bias
a__: List[Any] = num_detection_tokens
a__: Dict = use_mid_position_embeddings
a__: Optional[Any] = auxiliary_loss
# Hungarian matcher
a__: Dict = class_cost
a__: str = bbox_cost
a__: List[Any] = giou_cost
# Loss coefficients
a__: Union[str, Any] = bbox_loss_coefficient
a__: Tuple = giou_loss_coefficient
a__: Any = eos_coefficient
class __snake_case ( __lowerCAmelCase ):
a__ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return 12
| 353 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ = re.compile(r'\b(a|an|the)\b', re.UNICODE)
lowercase__ = None
def __a ( ) ->List[Any]:
a__: Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_SCREAMING_SNAKE_CASE , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
a__: Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Any = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
a__: Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__: Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Dict = (2 * precision * recall) / (precision + recall)
return fa
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Union[str, Any] = {}
a__: Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[int] = qa['id']
a__: List[Any] = [t for t in qa['answers']['text'] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__: str = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__: Any = preds[qid]
# Take max over all gold answers
a__: List[str] = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
a__: Optional[int] = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: List[str] = {}
for qid, s in scores.items():
a__: List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a__: Optional[int] = float(not qid_to_has_ans[qid] )
else:
a__: Optional[Any] = s
return new_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
if not qid_list:
a__: str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__: Optional[Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
for k in new_eval:
a__: List[Any] = new_eval[k]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: Optional[int] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
a__: Dict = 0.0
a__: Optional[int] = 1.0
a__: Tuple = 0.0
a__: Tuple = [1.0]
a__: Optional[Any] = [0.0]
a__: Optional[Any] = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__: Optional[Any] = true_pos / float(i + 1 )
a__: int = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
a__: Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__: Optional[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__: List[str] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__: Optional[Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
a__: List[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_exact' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_f1' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_oracle' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if not qid_list:
return
a__: Any = [na_probs[k] for k in qid_list]
a__: List[str] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'na_prob_hist_{name}.png' ) )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__: List[Any] = num_no_ans
a__: Union[str, Any] = cur_score
a__: Optional[Any] = 0.0
a__: str = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__: Tuple = scores[qid]
else:
if preds[qid]:
a__: Optional[Any] = -1
else:
a__: Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a__: Dict = cur_score
a__: Optional[int] = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__ , a__: str = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__: Optional[int] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = best_exact
a__: Dict = exact_thresh
a__: Optional[int] = best_fa
a__: str = fa_thresh
def __a ( ) ->int:
with open(OPTS.data_file ) as f:
a__: Tuple = json.load(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = {k: 0.0 for k in preds}
a__: List[Any] = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
a__: Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
a__: Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__: Optional[Any] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: str = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
a__: List[str] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'HasAns' )
if no_ans_qids:
a__: Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
lowercase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 203 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
SCREAMING_SNAKE_CASE_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : int = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 74 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : int=1_3 ,lowercase_ : Optional[int]=3_0 ,lowercase_ : int=2 ,lowercase_ : List[Any]=3 ,lowercase_ : str=True ,lowercase_ : int=True ,lowercase_ : str=3_2 ,lowercase_ : Optional[int]=5 ,lowercase_ : Optional[Any]=4 ,lowercase_ : Any=3_7 ,lowercase_ : str="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : str=0.02 ,):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : int = (image_size // patch_size) ** 2
lowerCAmelCase__ : Dict = num_patches + 1
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values
def __lowerCAmelCase ( self : Tuple ,lowercase_ : List[Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[Any] = FlaxViTModel(config=lowercase_ )
lowerCAmelCase__ : Dict = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : int = (self.image_size, self.image_size)
lowerCAmelCase__ : int = (self.patch_size, self.patch_size)
lowerCAmelCase__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : Optional[int] = self.type_sequence_label_size
lowerCAmelCase__ : Any = FlaxViTForImageClassification(config=lowercase_ )
lowerCAmelCase__ : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Tuple = FlaxViTForImageClassification(lowercase_ )
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : str = model(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Tuple = FlaxViTModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(lowercase_ )
lowerCAmelCase__ : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Dict = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : List[Any] ,**lowercase_ : Optional[int] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Optional[Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : Optional[int] = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowerCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowerCAmelCase__ : Optional[int] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
| 74 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowercase__ : Union[str, Any] = logging.getLogger(__name__)
lowercase__ : str = """pytorch_model.bin"""
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""}, )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """A csv or a json file containing the validation data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """The name of the task to train on."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""accuracy""", metadata={"""help""": """The evaluation metric used for the task."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""no""", metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=10, metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0, metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0, metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=100, metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""}, )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowercase_, metadata={"""help""": """Random seed for initialization."""}, )
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase_ : str = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase_ : str = int(eval_result * len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
lowerCAmelCase_ : str = dataset.sort('probability' , reverse=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = dataset.select(range(lowerCAmelCase__ ) )
lowerCAmelCase_ : str = dataset.remove_columns(['label', 'probability'] )
lowerCAmelCase_ : Optional[Any] = dataset.rename_column('prediction' , 'label' )
lowerCAmelCase_ : List[str] = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} )
lowerCAmelCase_ : List[Any] = dataset.shuffle(seed=args.seed )
lowerCAmelCase_ : List[Any] = os.path.join(lowerCAmelCase__ , f"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ )
else:
dataset.to_json(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase_ : List[str] = STModelArguments(model_name_or_path=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase__ ).items():
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Sanity checks
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : str = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase_ : Any = args.train_file
lowerCAmelCase_ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase_ : List[str] = args.eval_file
for key in data_files:
lowerCAmelCase_ : int = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
lowerCAmelCase_ : Optional[int] = extension
else:
assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
lowerCAmelCase_ : Tuple = f"{args.output_dir}/self-train_iter-{{}}".format
lowerCAmelCase_ : int = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
accelerator.wait_for_everyone()
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : int = False
# Show the progress bar
lowerCAmelCase_ : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCAmelCase_ : Tuple = data_dir_format(lowerCAmelCase__ )
assert os.path.exists(lowerCAmelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase_ : List[Any] = os.path.join(lowerCAmelCase__ , 'stage-1' )
lowerCAmelCase_ : List[str] = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
arguments_dict.update({key: value} )
lowerCAmelCase_ : int = os.path.join(lowerCAmelCase__ , 'best-checkpoint' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowerCAmelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase_ : int = os.path.join(lowerCAmelCase__ , 'best-checkpoint' )
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'stage-2' )
# Update arguments_dict
lowerCAmelCase_ : str = model_path
lowerCAmelCase_ : List[Any] = data_files['train']
lowerCAmelCase_ : Union[str, Any] = current_output_dir
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'best-checkpoint' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowerCAmelCase__ )
lowerCAmelCase_ : Any = iteration
lowerCAmelCase_ : List[Any] = data_dir_format(iteration + 1 )
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , 'best-checkpoint' ) )
lowerCAmelCase_ : List[Any] = config.idalabel
lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase__ , 'eval_results_best-checkpoint.json' )
lowerCAmelCase_ : str = os.path.join(lowerCAmelCase__ , 'test_results_best-checkpoint.json' )
assert os.path.exists(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : str = float(json.load(lowerCAmelCase__ )[args.eval_metric] )
lowerCAmelCase_ : Any = os.path.join(lowerCAmelCase__ , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(lowerCAmelCase__ )
# Loading the dataset from local csv or json files.
lowerCAmelCase_ : List[Any] = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
lowerCAmelCase_ : Optional[Any] = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f"eval_results_iter-{iteration}.json" ) )
if os.path.exists(lowerCAmelCase__ ):
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.wait_for_everyone()
lowerCAmelCase_ : Tuple = os.path.join(lowerCAmelCase__ , f"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase_ : List[Any] = eval_result
if best_iteration is None:
lowerCAmelCase_ : str = new_iteration
lowerCAmelCase_ : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase_ : int = new_iteration
lowerCAmelCase_ : Optional[int] = new_eval_result
lowerCAmelCase_ : List[Any] = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase_ : Tuple = new_iteration
lowerCAmelCase_ : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase_ : Optional[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , lowerCAmelCase__ )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f"eval_results_iter-{iteration}.json" ) , os.path.join(lowerCAmelCase__ , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(lowerCAmelCase__ , 'eval_results_best-iteration.json' ) , )
| 224 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = "cpu" , lowerCAmelCase__ : Union[str, None] = None ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Any = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
lowerCAmelCase_ : str = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Dict = src_path
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 224 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( snake_case_ : Optional[int] = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
UpperCamelCase : List[str] = BeautifulSoup(requests.get(__UpperCAmelCase ).text ,"""html.parser""" )
UpperCamelCase : List[Any] = soup.findAll("""h1""" )
UpperCamelCase : int = soup.findAll("""div""" ,{"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" ,{"""class""": """panel-title"""} )
values += soup.findAll("""div""" ,{"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCAmelCase ,__UpperCAmelCase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 352 |
"""simple docstring"""
from typing import Any
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = data
UpperCamelCase : Optional[Any] = None
def __repr__( self ):
return f'Node({self.data})'
class lowerCamelCase :
def __init__( self ):
UpperCamelCase : Dict = None
def __iter__( self ):
UpperCamelCase : int = self.head
while node:
yield node.data
UpperCamelCase : Union[str, Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCamelCase : List[Any] = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = current.next
UpperCamelCase : Optional[Any] = data
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCamelCase : Optional[Any] = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
UpperCamelCase : Dict = new_node
elif index == 0:
UpperCamelCase : Any = self.head # link new_node to head
UpperCamelCase : Any = new_node
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Any = temp.next
UpperCamelCase : Optional[Any] = new_node
def a_ ( self ): # print every node data
print(self )
def a_ ( self ):
return self.delete_nth(0 )
def a_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def a_ ( self , SCREAMING_SNAKE_CASE_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCamelCase : Union[str, Any] = self.head # default first node
if index == 0:
UpperCamelCase : Optional[Any] = self.head.next
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : int = temp.next
UpperCamelCase : Optional[Any] = temp.next
UpperCamelCase : Dict = temp.next.next
return delete_node.data
def a_ ( self ):
return self.head is None
def a_ ( self ):
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
UpperCamelCase : Optional[int] = current.next
# Make the current node's next point backwards
UpperCamelCase : Optional[Any] = prev
# Make the previous node be the current node
UpperCamelCase : int = current
# Make the current node the next node (to progress iteration)
UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
UpperCamelCase : Optional[int] = prev
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_ ,i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 ,1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCamelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8 ,1 ) )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
UpperCamelCase : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case_ )
print("""\nReading/changing Node data using indexing:""" )
print(f'Element at Position 1: {linked_list[1]}' )
UpperCamelCase : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 27 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def __lowerCamelCase ( ):
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 130 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCAmelCase_ : Tuple = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCAmelCase_ : Union[str, Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a )
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : str = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Any = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 91 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((a_) , (a_)) = extended_euclid(UpperCAmelCase , a % b )
a_ = a // b
return (y, x - k * y)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
((a_) , (a_)) = extended_euclid(UpperCAmelCase , UpperCAmelCase )
a_ = na * na
a_ = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
((a_) , (a_)) = extended_euclid(UpperCAmelCase , UpperCAmelCase )
if b < 0:
a_ = (b % n + n) % n
return b
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ , a_ = invert_modulo(UpperCAmelCase , UpperCAmelCase ), invert_modulo(UpperCAmelCase , UpperCAmelCase )
a_ = na * na
a_ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True) | 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 0 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__a = float('nan')
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = sys.stdout
_UpperCAmelCase : List[Any] = open(lowerCAmelCase__ , "a" )
def __getattr__( self : List[Any] , lowerCAmelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
return getattr(self.stdout , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
self.stdout.write(lowerCAmelCase__ )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , lowerCAmelCase__ , 0 , re.M ) )
def __UpperCAmelCase ( a_: List[str]=80, a_: Optional[int]=False ):
_UpperCAmelCase : Any = []
# deal with critical env vars
_UpperCAmelCase : Tuple = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_UpperCAmelCase : List[str] = os.environ.get(a_, a_ )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
_UpperCAmelCase : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(a_ )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Dict = ""
while len(a_ ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(a_ ) == 0 or len(a_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(a_ )
_UpperCAmelCase : int = ""
return "\\\n".join(a_ )
def __UpperCAmelCase ( a_: Optional[Any], a_: List[str] ):
# unwrap multi-line input
_UpperCAmelCase : Optional[int] = re.sub(r"[\\\n]+", " ", args.base_cmd )
# remove --output_dir if any and set our own
_UpperCAmelCase : Any = re.sub("--output_dir\s+[^\s]+", "", args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
_UpperCAmelCase : str = re.sub("--overwrite_output_dir\s+", "", args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( a_: List[str], a_: Optional[Any], a_: Optional[Any], a_: Any, a_: Dict, a_: Union[str, Any], a_: List[str] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 100 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )}, )
_UpperCAmelCase : str = subprocess.run(a_, capture_output=a_, text=a_ )
if verbose:
print("STDOUT", result.stdout )
print("STDERR", result.stderr )
# save the streams
_UpperCAmelCase : List[Any] = variation.replace(" ", "-" )
with open(Path(a_ ) / f"""log.{prefix}.stdout.txt""", "w" ) as f:
f.write(result.stdout )
with open(Path(a_ ) / f"""log.{prefix}.stderr.txt""", "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""", "r", encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[int] = json.load(a_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( a_: str, a_: List[str], a_: Tuple, a_: List[str], a_: Any, a_: Any, a_: Union[str, Any], a_: int, a_: str, a_: Any, ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Any = f"""{id}: {variation:<{longest_variation_len}}"""
_UpperCAmelCase : int = f"""{preamble}: """
_UpperCAmelCase : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(a_ ), desc=a_, leave=a_ ):
_UpperCAmelCase : Dict = process_run_single(
a_, a_, a_, a_, a_, a_, a_ )
_UpperCAmelCase : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(a_ ):
metrics.append(a_ )
results.append(a_ )
outcome += "✓"
else:
outcome += "✘"
_UpperCAmelCase : Union[str, Any] = f"""\33[2K\r{outcome}"""
if len(a_ ) > 0:
_UpperCAmelCase : str = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_UpperCAmelCase : int = round(mean_metrics[target_metric_key], 2 )
_UpperCAmelCase : List[str] = f"""{outcome} {mean_target}"""
if len(a_ ) > 1:
results_str += f""" {tuple(round(a_, 2 ) for x in results )}"""
print(a_ )
_UpperCAmelCase : Dict = variation
return mean_metrics
else:
print(a_ )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __UpperCAmelCase ( a_: List[str], a_: Optional[Any], a_: List[Any], a_: Any, a_: Optional[Any] ):
_UpperCAmelCase : List[str] = pd.DataFrame(a_ )
_UpperCAmelCase : Any = "variation"
_UpperCAmelCase : int = "diff_%"
_UpperCAmelCase : str = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_UpperCAmelCase : Union[str, Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_ ):
# as a fallback, use the minimal value as the sentinel
_UpperCAmelCase : Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_ ):
_UpperCAmelCase : str = df.apply(
lambda a_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis="columns", )
# re-order columns
_UpperCAmelCase : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_UpperCAmelCase : Union[str, Any] = df.reindex(a_, axis="columns" ) # reorder cols
# capitalize
_UpperCAmelCase : Optional[Any] = df.rename(str.capitalize, axis="columns" )
# make the cols as narrow as possible
_UpperCAmelCase : Union[str, Any] = df.rename(lambda a_ : c.replace("_", "<br>" ), axis="columns" )
_UpperCAmelCase : Tuple = df.rename(lambda a_ : c.replace("_", "\n" ), axis="columns" )
_UpperCAmelCase : str = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_, floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_, floatfmt=".2f" )]
print("\n\n".join(a_ ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd", default=a_, type=a_, required=a_, help="Base cmd", )
parser.add_argument(
"--variations", default=a_, type=a_, nargs="+", required=a_, help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'", )
parser.add_argument(
"--base-variation", default=a_, type=a_, help="Baseline variation to compare to. if None the minimal target value will be used to compare against", )
parser.add_argument(
"--target-metric-key", default=a_, type=a_, required=a_, help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second", )
parser.add_argument(
"--report-metric-keys", default="", type=a_, help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples", )
parser.add_argument(
"--repeat-times", default=1, type=a_, help="How many times to re-run each variation - an average will be reported", )
parser.add_argument(
"--output_dir", default="output_benchmark", type=a_, help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked", )
parser.add_argument(
"--verbose", default=a_, action="store_true", help="Whether to show the outputs of each run or just the benchmark progress", )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : List[str] = args.output_dir
Path(a_ ).mkdir(exist_ok=a_ )
_UpperCAmelCase : List[str] = get_base_command(a_, a_ )
# split each dimension into its --foo variations
_UpperCAmelCase : Optional[int] = [list(map(str.strip, re.split(r"\|", a_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_UpperCAmelCase : Optional[int] = list(map(str.strip, map(" ".join, itertools.product(*a_ ) ) ) )
_UpperCAmelCase : Tuple = max(len(a_ ) for x in variations )
# split wanted keys
_UpperCAmelCase : Any = args.report_metric_keys.split()
# capture prints into a log file for convenience
_UpperCAmelCase : List[str] = f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
_UpperCAmelCase : Optional[int] = Tee(a_ )
print(f"""\n*** Running {len(a_ )} benchmarks:""" )
print(f"""Base command: {' '.join(a_ )}""" )
_UpperCAmelCase : int = "variation"
_UpperCAmelCase : Dict = []
for id, variation in enumerate(tqdm(a_, desc="Total completion: ", leave=a_ ) ):
_UpperCAmelCase : str = base_cmd + variation.split()
results.append(
process_run(
id + 1, a_, a_, a_, a_, args.target_metric_key, a_, args.repeat_times, a_, args.verbose, ) )
process_results(a_, args.target_metric_key, a_, args.base_variation, a_ )
if __name__ == "__main__":
main() | 145 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : str = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase : str = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase : Any = model(input_ids.to(lowerCAmelCase__ ) , labels=labels.to(lowerCAmelCase__ ) ).loss
_UpperCAmelCase : Dict = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 145 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : bool , snake_case_ : list[int] , snake_case_ : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_ ) , )
)
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase_ = math.log(len(snake_case_ ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , snake_case_ , snake_case_ , snake_case_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 106 | '''simple docstring'''
import os
import numpy
import onnx
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = a.name
UpperCAmelCase_ = b.name
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = a == b
UpperCAmelCase_ = name_a
UpperCAmelCase_ = name_b
return res
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case_ , snake_case_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case_ , snake_case_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = os.path.dirname(snake_case_ )
UpperCAmelCase_ = os.path.basename(snake_case_ )
UpperCAmelCase_ = onnx.load(os.path.join(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = set()
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(len(snake_case_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case_ )
dup_set.add(snake_case_ )
UpperCAmelCase_ = inits[j].data_type
UpperCAmelCase_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , snake_case_ )
total_reduced_size += mem_size
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case_ )
else:
UpperCAmelCase_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
UpperCAmelCase_ = sorted(snake_case_ )
_remove_dup_initializers_from_model(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "optimized_" + model_file_name
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
onnx.save(snake_case_ , snake_case_ )
return new_model
| 106 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = embeddings_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = num_labels
__lowerCamelCase = scope
__lowerCamelCase = len(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowerCamelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = FlaxRegNetModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = FlaxRegNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self ):
'''simple docstring'''
return
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ):
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''np''' )
__lowerCamelCase = model(**__UpperCAmelCase )
# verify the logits
__lowerCamelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowerCamelCase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( __lowercase ) -> List[Any]:
_A = 384
if "tiny" in model_name:
_A = [3, 3, 9, 3]
_A = [96, 192, 384, 768]
if "small" in model_name:
_A = [3, 3, 27, 3]
_A = [96, 192, 384, 768]
if "base" in model_name:
_A = [3, 3, 27, 3]
_A = [128, 256, 512, 1024]
_A = 512
if "large" in model_name:
_A = [3, 3, 27, 3]
_A = [192, 384, 768, 1536]
_A = 768
if "xlarge" in model_name:
_A = [3, 3, 27, 3]
_A = [256, 512, 1024, 2048]
_A = 1024
# set label information
_A = 150
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def a__ ( __lowercase ) -> List[Any]:
_A = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = dct.pop(__lowercase )
_A = val
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_A = model_name_to_url[model_name]
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
_A = get_upernet_config(__lowercase )
_A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
if "bn" in key:
_A = key.replace("bn" , "batch_norm" )
_A = val
# rename keys
_A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
_A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("RGB" )
_A = SegformerImageProcessor()
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_A = model(__lowercase )
if model_name == "upernet-convnext-tiny":
_A = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_A = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_A = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_A = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_A = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'ctrl'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , a__ : Union[str, Any]=24_65_34 , a__ : int=2_56 , a__ : Any=12_80 , a__ : Optional[int]=81_92 , a__ : Union[str, Any]=48 , a__ : Optional[int]=16 , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Optional[int]=1E-6 , a__ : Optional[int]=0.0_2 , a__ : Tuple=True , **a__ : List[Any] , ) -> Tuple:
'''simple docstring'''
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = dff
_A = resid_pdrop
_A = embd_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
super().__init__(**a__ ) | 163 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _snake_case ( pl.LightningModule ):
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: int ) -> int:
super().__init__()
UpperCAmelCase_ : int = model
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : Any = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def A__ ( self: int ) -> Optional[Any]:
pass
def lowerCamelCase_ ( _a : str , _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = LongformerModel.from_pretrained(_a )
UpperCAmelCase_ : Optional[int] = LightningModel(_a )
UpperCAmelCase_ : Optional[Any] = torch.load(_a , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
UpperCAmelCase_ : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(_a )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_a )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 345 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345 | 1 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case ( lowercase_ ):
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) ->Any:
super().__init__(*a__ , **a__ )
a_ = eval_examples
a_ = post_process_function
def UpperCAmelCase__ ( self , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = "eval" , **__UpperCAmelCase , ) ->Dict[str, float]:
a_ = gen_kwargs.copy()
a_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
a_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
a_ = gen_kwargs
a_ = self.eval_dataset if eval_dataset is None else eval_dataset
a_ = self.get_eval_dataloader(a__ )
a_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ = self.compute_metrics
a_ = None
a_ = time.time()
a_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a_ = eval_loop(
a__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
a_ = compute_metrics
a_ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ = self.post_process_function(a__ , a__ , a__ )
a_ = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ = metrics.pop(a__ )
metrics.update(output.metrics )
else:
a_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , a__ )
return metrics
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase = "test" , **__UpperCAmelCase ) ->List[str]:
a_ = gen_kwargs.copy()
a_ = self.get_test_dataloader(a__ )
# Temporarily disable metric computation, we will do it in the loop here.
a_ = self.compute_metrics
a_ = None
a_ = time.time()
a_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a_ = eval_loop(
a__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , )
finally:
a_ = compute_metrics
a_ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ = self.post_process_function(a__ , a__ , a__ , "predict" )
a_ = self.compute_metrics(a__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ = metrics.pop(a__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a__ ) | 354 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 303 | 0 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase__ : Any = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase__ : List[str] = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
lowercase__ : Dict = '''|'''.join(sys.argv[1:])
lowercase__ : Optional[Any] = re.compile(rf"""^({joined_dirs}).*?\.py$""")
lowercase__ : Optional[int] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 190 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__A : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase , cache_dir=_UpperCAmelCase)
__A : Optional[Any] = [t[-1] for t in os.walk(os.path.join(_UpperCAmelCase , os.listdir(_UpperCAmelCase)[0] , 'snapshots'))]
__A : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase)
__A : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Optional[Any] = jax.random.PRNGKey(0)
__A : int = 4
__A : Tuple = jax.device_count()
__A : Union[str, Any] = num_samples * [prompt]
__A : Tuple = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : str = replicate(_UpperCAmelCase)
__A : Tuple = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = shard(_UpperCAmelCase)
__A : Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1514745) < 1e-3
assert np.abs(np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 49947.875) < 5e-1
__A : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(_UpperCAmelCase) == num_samples
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_UpperCAmelCase)
__A : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Tuple = jax.random.PRNGKey(0)
__A : Any = 50
__A : str = jax.device_count()
__A : Union[str, Any] = num_samples * [prompt]
__A : List[str] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : Dict = replicate(_UpperCAmelCase)
__A : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : int = shard(_UpperCAmelCase)
__A : Tuple = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05652401)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2383808.2)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase)
__A : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : str = jax.random.PRNGKey(0)
__A : Any = 50
__A : Optional[int] = jax.device_count()
__A : int = num_samples * [prompt]
__A : Optional[int] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : Optional[int] = replicate(_UpperCAmelCase)
__A : List[str] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = shard(_UpperCAmelCase)
__A : str = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2373516.75)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
__A : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Any = jax.random.PRNGKey(0)
__A : List[str] = 50
__A : Optional[int] = jax.device_count()
__A : List[Any] = num_samples * [prompt]
__A : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : Union[str, Any] = replicate(_UpperCAmelCase)
__A : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = shard(_UpperCAmelCase)
__A : int = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2373516.75)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
__A ,__A : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
__A : Optional[Any] = scheduler.create_state()
__A : Any = scheduler_state
__A : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Union[str, Any] = jax.random.PRNGKey(0)
__A : Optional[int] = 50
__A : Optional[Any] = jax.device_count()
__A : Any = num_samples * [prompt]
__A : Optional[Any] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : int = replicate(_UpperCAmelCase)
__A : Any = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = shard(_UpperCAmelCase)
__A : Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045043945)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2347693.5)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : int = jax.device_count()
__A : List[Any] = num_samples * [prompt]
__A : List[Any] = jax.random.split(jax.random.PRNGKey(0) , _UpperCAmelCase)
__A ,__A : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , )
__A : str = replicate(_UpperCAmelCase)
__A : str = pipeline.prepare_inputs(_UpperCAmelCase)
__A : str = shard(_UpperCAmelCase)
__A : int = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__A : Any = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__A ,__A : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , use_memory_efficient_attention=_UpperCAmelCase , )
__A : Any = replicate(_UpperCAmelCase)
__A : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase)
__A : Optional[Any] = shard(_UpperCAmelCase)
__A : List[Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__A : List[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2 | 190 | 1 |
"""simple docstring"""
def __lowercase ( ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 359 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 118 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE_ : Tuple = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
A__ = json.loads(f.read() )
A__ = collections.OrderedDict()
A__ = collections.OrderedDict()
A__ = collections.OrderedDict()
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
A__ = f.readlines()
A__ = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCAmelCase_ ):
A__ = b
A__ = idx
for wd in b:
A__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Dict , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Tuple="<|endoftext|>" , UpperCamelCase: Any="<|endoftext|>" , UpperCamelCase: str="<|startoftext|>" , UpperCamelCase: int="<|endoftext|>" , UpperCamelCase: List[str]=False , **UpperCamelCase: Tuple , ):
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase , pad_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , do_clean_text=UpperCamelCase , **UpperCamelCase , )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
A__ = do_clean_text
A__ , A__ , A__ , A__ = load_vocab_and_emoji(UpperCamelCase , UpperCamelCase )
A__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return len(self.raw_vocab )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(UpperCamelCase , clean=self.do_clean_text )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
return self.vocab.get(UpperCamelCase , self.vocab.get(self.unk_token ) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = """""".join(UpperCamelCase ).strip()
return out_string
def UpperCamelCase ( self: Tuple , UpperCamelCase: "Conversation" ):
"""simple docstring"""
A__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
A__ = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
A__ = 0
if os.path.isdir(UpperCamelCase ):
A__ = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
A__ = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
A__ = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
A__ = token_index
writer.write(""",""".join(UpperCamelCase ) + """\n""" )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , UpperCamelCase )
return vocab_file, emoji_file
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = vocab # same as swe
A__ = ids_to_tokens # same as bpe
A__ = emoji
A__ = np.max([len(UpperCamelCase ) for w in self.vocab.keys()] )
A__ = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
A__ = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
A__ = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
A__ = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A__ = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A__ = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
A__ = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
A__ = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
A__ = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self: Optional[Any] ):
"""simple docstring"""
return len(self.ids_to_tokens )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.content_repattera.sub("""<URL>""" , UpperCamelCase )
A__ = self.content_repattera.sub("""<EMAIL>""" , UpperCamelCase )
A__ = self.content_repattera.sub("""<TEL>""" , UpperCamelCase )
A__ = self.content_repattera.sub("""<DATE>""" , UpperCamelCase )
A__ = self.content_repattera.sub("""<DATE>""" , UpperCamelCase )
A__ = self.content_repattera.sub("""<PRICE>""" , UpperCamelCase )
A__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A__ = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCamelCase ( self: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any]=False ):
"""simple docstring"""
A__ = text.replace(""" """ , """<SP>""" )
A__ = text.replace(""" """ , """<SP>""" )
A__ = text.replace("""\r\n""" , """<BR>""" )
A__ = text.replace("""\n""" , """<BR>""" )
A__ = text.replace("""\r""" , """<BR>""" )
A__ = text.replace("""\t""" , """<TAB>""" )
A__ = text.replace("""—""" , """ー""" )
A__ = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
A__ = text.replace(UpperCamelCase , UpperCamelCase )
if clean:
A__ = self.clean_text(UpperCamelCase )
def check_simbol(UpperCamelCase: str ):
A__ = x.encode()
if len(UpperCamelCase ) == 1 and len(UpperCamelCase ) == 2:
A__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(UpperCamelCase: List[str] ):
A__ = x.encode()
if len(UpperCamelCase ) == 1 and len(UpperCamelCase ) == 3:
A__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28080 and c <= 0Xe2b07f:
return True
return False
A__ = 0
A__ = []
while pos < len(UpperCamelCase ):
A__ = min(len(UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
A__ = [] # (token_id, token, pos)
for e in range(UpperCamelCase , UpperCamelCase , -1 ):
A__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase ) > 2:
A__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase ) > 0:
# the smallest token_id is adopted
A__ , A__ , A__ = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[0] )[0]
result.append(UpperCamelCase )
A__ = e
else:
A__ = pos + 1
A__ = text[pos:end]
if check_simbol(UpperCamelCase ):
result.append("""<KIGOU>""" )
elif checkuae(UpperCamelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
A__ = end
return result
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Tuple="\n" ):
"""simple docstring"""
A__ = []
A__ = []
A__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase ) > 0:
words.append(bytearray(UpperCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
A__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(UpperCamelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(UpperCamelCase )
if len(UpperCamelCase ) > 0:
words.append(bytearray(UpperCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
A__ = """""".join(UpperCamelCase )
return text
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A_ ( snake_case_ : int ,snake_case_ : int ,snake_case_ : List[str] ,snake_case_ : str ,snake_case_ : List[str] ,snake_case_ : List[str] ):
'''simple docstring'''
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCamelCase : Union[str, Any] = ksize + 1
UpperCamelCase : Optional[Any] = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(lowerCAmelCase__ ):
for x in range(lowerCAmelCase__ ):
# distance from center
UpperCamelCase : List[Any] = x - ksize // 2
UpperCamelCase : str = y - ksize // 2
# degree to radiant
UpperCamelCase : Any = theta / 1_8_0 * np.pi
UpperCamelCase : Any = np.cos(_theta )
UpperCamelCase : List[Any] = np.sin(_theta )
# get kernel x
UpperCamelCase : Tuple = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase : Union[str, Any] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase : str = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A : Union[str, Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__A : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__A : Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A : Optional[int] = out / out.max() * 255
__A : Tuple = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 358 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( snake_case_ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
UpperCamelCase : Any = BeautifulSoup(requests.get(snake_case_ ).text ,"""html.parser""" )
UpperCamelCase : Optional[int] = soup.findAll("""h1""" )
UpperCamelCase : List[Any] = soup.findAll("""div""" ,{"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" ,{"""class""": """panel-title"""} )
values += soup.findAll("""div""" ,{"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(snake_case_ ,snake_case_ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 27 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowerCamelCase: Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
_lowerCamelCase: Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
_lowerCamelCase: Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = super().to_dict()
for k, v in d.items():
if isinstance(A_ ,A_ ):
A = v.to_dict()
return d | 74 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 1 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : Any = 4000000 ):
_A : Any = []
_A , _A : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCamelCase )
_A , _A : Optional[Any] = b, a + b
return sum(lowerCamelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 358 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = 42
a = None
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Tuple=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : Optional[Any] = i / num_diffusion_timesteps
_A : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : float = 1.0 , **SCREAMING_SNAKE_CASE : List[str] , ):
if kwargs.get('set_alpha_to_one' , SCREAMING_SNAKE_CASE) is not None:
_A : Tuple = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE)
_A : Tuple = kwargs['set_alpha_to_one']
if trained_betas is not None:
_A : Any = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : List[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Optional[int] = 1.0 - self.betas
_A : Union[str, Any] = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_A : Optional[int] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_A : Union[str, Any] = 1.0
# setable values
_A : List[str] = None
_A : Dict = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE).copy().astype(np.intaa))
def A ( self : str , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None):
return sample
def A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.')
_A : Optional[Any] = num_inference_steps
_A : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : List[str] = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round().copy().astype(np.intaa)
_A : int = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.timesteps += self.config.steps_offset
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : bool = True , ):
# 1. get previous step value (=t+1)
_A : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_A : List[str] = self.alphas_cumprod[timestep]
_A : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_A : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_A : List[Any] = model_output
elif self.config.prediction_type == "sample":
_A : List[Any] = model_output
_A : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_A : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_A : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE)
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCAmelCase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
_lowerCAmelCase = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Union[str, Any] = bs[:]
lowerCAmelCase__ : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Any = [chr(UpperCamelCase ) for n in cs]
return dict(zip(UpperCamelCase , UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = set()
lowerCAmelCase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[int] = char
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="replace" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> List[str]:
lowerCAmelCase__ : Optional[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Any = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Optional[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token
lowerCAmelCase__ : List[str] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token
lowerCAmelCase__ : List[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else unk_token
lowerCAmelCase__ : List[str] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,**__UpperCAmelCase ,)
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : int = json.load(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : List[str] = errors # how to handle errors in decoding
lowerCAmelCase__ : Optional[int] = bytes_to_unicode()
lowerCAmelCase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : List[str] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Union[str, Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase_ ( self ) -> str:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Tuple:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Any = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : int = bigram
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[Any] = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Union[str, Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Any = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(__UpperCAmelCase )
lowerCAmelCase__ : int = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : int = []
for token in re.findall(self.pat ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.decoder.get(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[Any] = """""".join(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : int = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" )
lowerCAmelCase__ : Tuple = 0
with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : List[str] = token_index
writer.write(""" """.join(__UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Union[str, Any] = """ """ + text
return (text, kwargs)
| 37 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 0 |
import math
def UpperCamelCase_( lowerCamelCase_ = 100 ) -> int:
_lowercase : int = sum(i * i for i in range(1 , n + 1 ) )
_lowercase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 370 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """char"""
lowercase_ : Any = """bpe"""
lowercase_ : Optional[int] = """wp"""
SCREAMING_SNAKE_CASE : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCamelCase( _a ):
lowercase_ : Any = ["""image_processor""", """char_tokenizer"""]
lowercase_ : Tuple = """ViTImageProcessor"""
lowercase_ : List[str] = """MgpstrTokenizer"""
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : str = kwargs.pop('feature_extractor')
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
_lowercase : List[Any] = tokenizer
_lowercase : Tuple = AutoTokenizer.from_pretrained('gpt2')
_lowercase : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased')
super().__init__(lowerCamelCase, lowerCamelCase)
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
_lowercase : Optional[Any] = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : Optional[int] = self.char_tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Optional[int] = sequences
_lowercase : str = char_preds.size(0)
_lowercase , _lowercase : List[Any] = self._decode_helper(lowerCamelCase, 'char')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'bpe')
_lowercase , _lowercase : str = self._decode_helper(lowerCamelCase, 'wp')
_lowercase : Dict = []
_lowercase : Any = []
for i in range(lowerCamelCase):
_lowercase : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowercase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowercase : Union[str, Any] = scores.index(max(lowerCamelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
_lowercase : str = {}
_lowercase : int = final_strs
_lowercase : Optional[Any] = final_scores
_lowercase : Tuple = char_strs
_lowercase : Dict = bpe_strs
_lowercase : Tuple = wp_strs
return out
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
if format == DecodeType.CHARACTER:
_lowercase : Optional[Any] = self.char_decode
_lowercase : int = 1
_lowercase : int = '[s]'
elif format == DecodeType.BPE:
_lowercase : List[Any] = self.bpe_decode
_lowercase : Union[str, Any] = 2
_lowercase : Any = '#'
elif format == DecodeType.WORDPIECE:
_lowercase : int = self.wp_decode
_lowercase : Optional[Any] = 1_02
_lowercase : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''')
_lowercase , _lowercase : Tuple = [], []
_lowercase : str = pred_logits.size(0)
_lowercase : Tuple = pred_logits.size(1)
_lowercase , _lowercase : Dict = pred_logits.topk(1, dim=-1, largest=lowerCamelCase, sorted=lowerCamelCase)
_lowercase : List[str] = preds_index.view(-1, lowerCamelCase)[:, 1:]
_lowercase : int = decoder(lowerCamelCase)
_lowercase , _lowercase : Optional[Any] = torch.nn.functional.softmax(lowerCamelCase, dim=2).max(dim=2)
_lowercase : Optional[Any] = preds_max_prob[:, 1:]
for index in range(lowerCamelCase):
_lowercase : List[str] = preds_str[index].find(lowerCamelCase)
_lowercase : int = preds_str[index][:pred_eos]
_lowercase : List[str] = preds_index[index].cpu().tolist()
_lowercase : Optional[int] = pred_index.index(lowerCamelCase) if eos_token in pred_index else -1
_lowercase : int = preds_max_prob[index][: pred_eos_index + 1]
_lowercase : Tuple = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase)
conf_scores.append(lowerCamelCase)
return dec_strs, conf_scores
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Dict = [seq.replace(' ', '') for seq in self.char_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = [seq.replace(' ', '') for seq in self.wp_tokenizer.batch_decode(lowerCamelCase)]
return decode_strs
| 84 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( a , a ) -> list[str]:
if nth_term == "":
return [""]
_A: Union[str, Any] = int(a )
_A: Optional[int] = int(a )
_A: list[str] = []
for temp in range(int(a ) ):
series.append(f"""1 / {pow(temp + 1 , int(a ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[Any] = int(input('Enter the last number (nth term) of the P-Series'))
UpperCAmelCase__ : int = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 121 |
def lowerCamelCase__ ( a ) -> bool:
_A: Dict = [int(a ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(a ) == 4 and all(0 <= int(a ) <= 2_54 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase__ : str = input().strip()
UpperCAmelCase__ : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 121 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A ( A__ ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase = model_type_to_module_name(A__ )
__lowercase = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A__ , '''__name__''' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase = importlib.import_module('''transformers''' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def _A ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ):
"""simple docstring"""
__lowercase = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(A__ , encoding='''utf-8''' ) as reader:
return json.load(A__ )
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowercase__ )
def SCREAMING_SNAKE_CASE ( cls : List[str] ,lowercase__ : Optional[Any] ,**lowercase__ : List[Any] ):
__lowercase = kwargs.pop('''config''' ,lowercase__ )
__lowercase = kwargs.pop('''trust_remote_code''' ,lowercase__ )
__lowercase = True
__lowercase , __lowercase = FeatureExtractionMixin.get_feature_extractor_dict(lowercase__ ,**lowercase__ )
__lowercase = config_dict.get('''feature_extractor_type''' ,lowercase__ )
__lowercase = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' ,{} ):
__lowercase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase__ ,lowercase__ ):
__lowercase = AutoConfig.from_pretrained(lowercase__ ,**lowercase__ )
# It could be in `config.feature_extractor_type``
__lowercase = getattr(lowercase__ ,'''feature_extractor_type''' ,lowercase__ )
if hasattr(lowercase__ ,'''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
__lowercase = feature_extractor_class_from_name(lowercase__ )
__lowercase = feature_extractor_auto_map is not None
__lowercase = feature_extractor_class is not None or type(lowercase__ ) in FEATURE_EXTRACTOR_MAPPING
__lowercase = resolve_trust_remote_code(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if has_remote_code and trust_remote_code:
__lowercase = get_class_from_dynamic_module(
lowercase__ ,lowercase__ ,**lowercase__ )
__lowercase = kwargs.pop('''code_revision''' ,lowercase__ )
if os.path.isdir(lowercase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase__ ,**lowercase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase__ ,**lowercase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase__ ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase = FEATURE_EXTRACTOR_MAPPING[type(lowercase__ )]
return feature_extractor_class.from_dict(lowercase__ ,**lowercase__ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : List[str] ,lowercase__ : List[Any] ):
FEATURE_EXTRACTOR_MAPPING.register(lowercase__ ,lowercase__ )
| 357 |
'''simple docstring'''
import string
def _A ( A__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowercase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase = string.ascii_uppercase.find(A__ )
__lowercase = num - key
if num < 0:
__lowercase = num + len(string.ascii_uppercase )
__lowercase = translated + string.ascii_uppercase[num]
else:
__lowercase = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Encrypted message: ''' )
__lowercase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_snake_case , _snake_case ) ) )
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(_snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(_snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Optional[Any] = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(_snake_case )
lowerCAmelCase : str = []
for value in value_array:
lowerCAmelCase : int = euclidean(_snake_case , dataset[0] )
lowerCAmelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Any = euclidean(_snake_case , _snake_case )
if dist > temp_dist:
lowerCAmelCase : List[Any] = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
return np.dot(_snake_case , _snake_case ) / (norm(_snake_case ) * norm(_snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
"""simple docstring"""
snake_case__ : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 60 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def lowercase ( a__ : Dict ) -> Optional[Any]:
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(_a )
return pairs
class UpperCAmelCase_ ( __snake_case):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[Any]="__start__" , __UpperCamelCase : Optional[Any]="__end__" , __UpperCamelCase : int="__unk__" , __UpperCamelCase : int="__null__" , **__UpperCamelCase : Tuple , ) -> Union[str, Any]:
super().__init__(unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(lowerCamelCase_ )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in merges]
_UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_UpperCamelCase = {}
@property
def _UpperCamelCase ( self : str ) -> int:
return len(self.encoder )
def _UpperCamelCase ( self : int ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> str:
if token in self.cache:
return self.cache[token]
_UpperCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , lowerCamelCase_ )
_UpperCamelCase = re.sub('''(\')''' , R''' \1 ''' , lowerCamelCase_ )
_UpperCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , lowerCamelCase_ )
if "\n" in token:
_UpperCamelCase = token.replace('''\n''' , ''' __newln__''' )
_UpperCamelCase = token.split(''' ''' )
_UpperCamelCase = []
for token in tokens:
if not len(lowerCamelCase_ ):
continue
_UpperCamelCase = token.lower()
_UpperCamelCase = tuple(lowerCamelCase_ )
_UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
words.append(lowerCamelCase_ )
continue
while True:
_UpperCamelCase = min(lowerCamelCase_ , key=lambda __UpperCamelCase : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
_UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
new_word.extend(word[i:j] )
_UpperCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(lowerCamelCase_ )
_UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_UpperCamelCase = get_pairs(lowerCamelCase_ )
_UpperCamelCase = """@@ """.join(lowerCamelCase_ )
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
words.append(lowerCamelCase_ )
return " ".join(lowerCamelCase_ )
def _UpperCamelCase ( self : Any , __UpperCamelCase : str ) -> List[str]:
_UpperCamelCase = []
_UpperCamelCase = re.findall(R'''\S+\n?''' , lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def _UpperCamelCase ( self : int , __UpperCamelCase : str ) -> int:
_UpperCamelCase = token.lower()
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : int ) -> str:
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[str] ) -> str:
_UpperCamelCase = """ """.join(lowerCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _UpperCamelCase ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
_UpperCamelCase = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 364 | """simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Union[str, Any] , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : List[Any] ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Tuple ) -> Any:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 54 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase : Optional[Any] = 5_0_0_0_0
_UpperCAmelCase : Dict = 5_0_0_0
_UpperCAmelCase ,_UpperCAmelCase : Any = os.path.split(__file__)
_UpperCAmelCase : str = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase):
for i in range(lowerCamelCase):
__lowerCAmelCase = dataset[i]
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
for i in range(0, len(lowerCamelCase), lowerCamelCase):
__lowerCAmelCase = dataset[i : i + batch_size]
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
with dataset.formatted_as(type=lowerCamelCase):
for i in range(lowerCamelCase):
__lowerCAmelCase = dataset[i]
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
with dataset.formatted_as(type=lowerCamelCase):
for i in range(0, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = dataset[i : i + batch_size]
def __magic_name__( ):
__lowerCAmelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0_0}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0_0_0}),
]
__lowerCAmelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0_0}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''')
__lowerCAmelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''')), '''numbers''': datasets.Value('''float32''')})
__lowerCAmelCase = generate_example_dataset(
os.path.join(lowerCamelCase, '''dataset.arrow'''), lowerCamelCase, num_examples=lowerCamelCase, seq_shapes={'''list''': (1_0_0,)}, )
print('''first set of iterations''')
for func, kwargs in functions:
print(func.__name__, str(lowerCamelCase))
__lowerCAmelCase = func(lowerCamelCase, **lowerCamelCase)
print('''shuffling dataset''')
__lowerCAmelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''')
for func, kwargs in functions_shuffled:
print('''shuffled ''', func.__name__, str(lowerCamelCase))
__lowerCAmelCase = func(
lowerCamelCase, **lowerCamelCase)
with open(lowerCamelCase, '''wb''') as f:
f.write(json.dumps(lowerCamelCase).encode('''utf-8'''))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 174 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
__lowerCAmelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''')
print(F"""Generating {path}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(lowerCamelCase)
# make sure we are under the root of the project
_UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""")
_UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 174 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 1 |
"""simple docstring"""
_a : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_a : Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Tuple = from_type.lower().strip("""s""" )
_lowerCAmelCase : str = to_type.lower().strip("""s""" )
_lowerCAmelCase : int = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : Optional[int] = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : int = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Tuple = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase : Optional[int] = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase : List[str] = 1
if from_exponent > to_exponent:
_lowerCAmelCase : str = from_exponent - to_exponent
else:
_lowerCAmelCase : Dict = -(to_exponent - from_exponent)
return value * pow(10 ,_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 44 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 | 0 |
import functools
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
if not isinstance(a_ , a_ ) or not all(isinstance(a_ , a_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(a_ ) != 3 or not all(isinstance(a_ , a_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(a_ ) == 0:
return 0
if min(a_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(a_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
__magic_name__ : Any = set(a_ )
@functools.cache
def dynamic_programming(_snake_case : Any ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = None
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self , _a ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE ( self ):
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ):
return f'''`pip install {cls.pip_package or cls.name}`'''
class _snake_case ( snake_case ):
UpperCamelCase__ = 'optuna'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_optuna_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_optuna(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_optuna(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'ray'
UpperCamelCase__ = '\'ray[tune]\''
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_ray_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_ray(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_ray(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'sigopt'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_sigopt_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_sigopt(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_sigopt(_a )
class _snake_case ( snake_case ):
UpperCamelCase__ = 'wandb'
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
return is_wandb_available()
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , **_a ):
return run_hp_search_wandb(_a , _a , _a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return default_hp_space_wandb(_a )
snake_case : int = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
__magic_name__ : Dict = available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 41 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowercase__ :Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase = '''cpu'''
lowercase = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowercase = Path(lowerCAmelCase__ )
# TEXT ENCODER
lowercase = pipeline.text_encoder.config.max_position_embeddings
lowercase = pipeline.text_encoder.config.hidden_size
lowercase = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
lowercase = pipeline.unet.config.in_channels
lowercase = pipeline.unet.config.sample_size
lowercase = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
lowercase = str(unet_path.absolute().as_posix() )
lowercase = os.path.dirname(lowerCAmelCase__ )
lowercase = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location='''weights.pb''' , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
lowercase = pipeline.vae
lowercase = vae_encoder.config.in_channels
lowercase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
lowercase = pipeline.vae
lowercase = vae_decoder.config.latent_channels
lowercase = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase = pipeline.safety_checker
lowercase = safety_checker.config.vision_config.num_channels
lowercase = safety_checker.config.vision_config.image_size
lowercase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
lowercase = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowercase = pipeline.feature_extractor
else:
lowercase = None
lowercase = None
lowercase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print('''ONNX pipeline saved to''' , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
lowercase = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase__ :List[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 101 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(
vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,unet=A__ ,scheduler=A__ ,safety_checker=A__ ,feature_extractor=A__ ,)
def A__ ( self ,A__ = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__)
def A__ ( self):
self.enable_attention_slicing(A__)
@torch.no_grad()
def __call__( self ,A__ ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 5_0 ,A__ = 7.5 ,A__ = None ,A__ = 1 ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,A__ = None ,A__ = 1 ,A__ = None ,**A__ ,):
if isinstance(A__ ,A__):
lowercase = 1
elif isinstance(A__ ,A__):
lowercase = len(A__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A__)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
# get prompt text embeddings
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 ,A__ ,1)
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,A__ ,-1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = ['''''']
elif type(A__) is not type(A__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A__)} !='
f' {type(A__)}.')
elif isinstance(A__ ,A__):
lowercase = [negative_prompt]
elif batch_size != len(A__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=A__ ,truncation=A__ ,return_tensors='''pt''' ,)
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(A__ ,A__ ,1)
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,A__ ,-1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(
A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(self.device)
lowercase = torch.randn(A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(
self.device)
else:
lowercase = torch.randn(
A__ ,generator=A__ ,device=self.device ,dtype=A__)
lowercase = torch.randn(A__ ,generator=A__ ,device=self.device ,dtype=A__)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
lowercase = latents_reference.to(self.device)
lowercase = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase = 0 if dx < 0 else dx
lowercase = 0 if dy < 0 else dy
lowercase = max(-dx ,0)
lowercase = max(-dy ,0)
# import pdb
# pdb.set_trace()
lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
# predict the noise residual
lowercase = self.unet(A__ ,A__ ,encoder_hidden_states=A__).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ ,A__)
lowercase = 1 / 0.18215 * latents
lowercase = self.vae.decode(A__).sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
if self.safety_checker is not None:
lowercase = self.feature_extractor(self.numpy_to_pil(A__) ,return_tensors='''pt''').to(
self.device)
lowercase , lowercase = self.safety_checker(
images=A__ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
lowercase = None
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A__ ,nsfw_content_detected=A__)
| 101 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def lowerCamelCase () -> int:
lowercase :str = argparse.ArgumentParser()
parser.add_argument('''-f''')
lowercase :Optional[Any] = parser.parse_args()
return args.f
class __magic_name__ ( __UpperCAmelCase ):
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
def __snake_case ( self : int , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
lowercase :Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case__ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(snake_case__ )
lowercase :Any = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(snake_case__ )
lowercase :Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(snake_case__ )
| 172 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[Any] = "retribert"
def __init__( self : Dict , snake_case__ : Union[str, Any]=3_0_5_2_2 , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : Optional[Any]=8 , snake_case__ : int=1_2 , snake_case__ : Optional[int]=3_0_7_2 , snake_case__ : Any="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=1e-1_2 , snake_case__ : Any=True , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[int]=0 , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowercase :Any = vocab_size
lowercase :Optional[Any] = hidden_size
lowercase :str = num_hidden_layers
lowercase :List[str] = num_attention_heads
lowercase :Union[str, Any] = hidden_act
lowercase :Any = intermediate_size
lowercase :str = hidden_dropout_prob
lowercase :str = attention_probs_dropout_prob
lowercase :Optional[Any] = max_position_embeddings
lowercase :Union[str, Any] = type_vocab_size
lowercase :Any = initializer_range
lowercase :int = layer_norm_eps
lowercase :List[str] = share_encoders
lowercase :Union[str, Any] = projection_dim
| 172 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a : Optional[int] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase : str = random.Random()
UpperCAmelCase : str = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase : Dict = []
for _ in range(__UpperCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase : int = np.array(__UpperCamelCase , dtype=jnp.intaa ).reshape(__UpperCamelCase )
return output
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = ids_tensor(__UpperCamelCase , vocab_size=2 , rng=__UpperCamelCase )
# make sure that at least one token is attended to for each batch
UpperCAmelCase : Optional[int] = 1
return attn_mask
@require_flax
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = ()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase : Tuple = 2
UpperCAmelCase : Optional[int] = inputs["input_ids"].shape[-1] // 2
UpperCAmelCase : Optional[int] = inputs["input_ids"][:max_batch_size, :sequence_length]
UpperCAmelCase : Any = jnp.ones_like(_lowercase )
UpperCAmelCase : List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase : Tuple = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
UpperCAmelCase : Any = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(_lowercase )
UpperCAmelCase : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : Optional[Any] = getattr(_lowercase , _lowercase )
UpperCAmelCase : str = pt_model_class(_lowercase ).eval()
UpperCAmelCase : Optional[Any] = load_flax_weights_in_pytorch_model(_lowercase , flax_model.params )
UpperCAmelCase : List[str] = flax_model.generate(_lowercase ).sequences
UpperCAmelCase : Dict = pt_model.generate(torch.tensor(_lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : List[Any] = model_class(_lowercase )
UpperCAmelCase : Optional[int] = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(_lowercase )
UpperCAmelCase : Any = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : List[Any] = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[Any] = False
UpperCAmelCase : int = max_length
UpperCAmelCase : Union[str, Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : List[Any] = model_class(_lowercase )
UpperCAmelCase : Any = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : List[str] = max_length
UpperCAmelCase : Optional[Any] = 2
UpperCAmelCase : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Tuple = model_class(_lowercase )
UpperCAmelCase : List[Any] = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self._get_input_ids_and_config()
UpperCAmelCase : List[Any] = True
UpperCAmelCase : List[Any] = max_length
UpperCAmelCase : List[str] = 0.8
UpperCAmelCase : List[str] = 1_0
UpperCAmelCase : Any = 0.3
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Union[str, Any] = 8
UpperCAmelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : List[str] = model_class(_lowercase )
UpperCAmelCase : Optional[Any] = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : int = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = max_length
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Optional[int] = model_class(_lowercase )
UpperCAmelCase : Dict = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Optional[int] = max_length
UpperCAmelCase : Tuple = 2
UpperCAmelCase : int = 1
UpperCAmelCase : str = 8
UpperCAmelCase : Any = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(_lowercase )
UpperCAmelCase : List[str] = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : int = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : List[str] = model_class(_lowercase )
UpperCAmelCase : Any = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : str = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : List[Any] = model_class(_lowercase )
UpperCAmelCase : Any = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Dict = 2
UpperCAmelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Dict = model_class(_lowercase )
UpperCAmelCase : Dict = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : int = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
UpperCAmelCase : Any = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCAmelCase : Optional[int] = "Hello world"
UpperCAmelCase : Optional[int] = tokenizer(_lowercase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase , "do_samples" ):
model.generate(_lowercase , do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase , "foo" ):
UpperCAmelCase : Optional[Any] = {"foo": "bar"}
model.generate(_lowercase , **_lowercase )
| 311 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "Salesforce/blip-image-captioning-base"
lowerCAmelCase_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowerCAmelCase_ = "image_captioner"
lowerCAmelCase_ = AutoModelForVisionaSeq
lowerCAmelCase_ = ["image"]
lowerCAmelCase_ = ["text"]
def __init__( self : List[Any] , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=_lowercase , return_tensors="""pt""" )
def __a ( self : Union[str, Any] , _lowercase : Optional[int] ):
"""simple docstring"""
return self.model.generate(**_lowercase )
def __a ( self : int , _lowercase : Any ):
"""simple docstring"""
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
| 219 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase = InstructBlipProcessor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).qformer_tokenizer
def A__ ( self ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
UpperCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer , __lowerCamelCase )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
UpperCamelCase = '''lower newer'''
UpperCamelCase = processor(text=__lowerCamelCase )
UpperCamelCase = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
UpperCamelCase = qformer_tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
UpperCamelCase = '''lower newer'''
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(__lowerCamelCase )
UpperCamelCase = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase )
UpperCamelCase = '''lower newer'''
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 356 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[32, 64, 128] , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2"] , _SCREAMING_SNAKE_CASE=[1, 2] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FocalNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Tuple:
"""simple docstring"""
return
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = reshaped_hidden_states[0].shape
UpperCamelCase = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = FocalNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = (FocalNetBackbone,) if is_torch_available() else ()
lowercase = FocalNetConfig
lowercase = False
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
| 183 | 0 |
import socket
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__lowerCamelCase : Tuple = socket.gethostname()
__lowerCamelCase : Union[str, Any] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__lowerCamelCase : Dict = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(lowerCamelCase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 73 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=1_3 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : List[Any]=9_9 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,SCREAMING_SNAKE_CASE__ : int=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,):
__lowerCamelCase : int = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = num_choices
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=SCREAMING_SNAKE_CASE__ ,)
return config, input_ids, attention_mask
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Dict = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = FlaxDistilBertModelTester(self)
@slow
def lowerCAmelCase ( self : int):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased')
__lowerCamelCase : str = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__lowerCamelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
__lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , )->List[Any]:
'''simple docstring'''
A_ : str = parent
A_ : Optional[Any] = batch_size
A_ : List[str] = num_channels
A_ : Optional[Any] = image_size
A_ : Any = patch_size
A_ : Optional[Any] = is_training
A_ : Dict = use_input_mask
A_ : Union[str, Any] = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : str = vocab_size
A_ : Tuple = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : int = intermediate_size
A_ : List[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : str = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : List[str] = coordinate_size
A_ : List[Any] = shape_size
A_ : Optional[int] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
A_ : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ : Dict = text_seq_length
A_ : Dict = (image_size // patch_size) ** 2 + 1
A_ : Optional[int] = self.text_seq_length + self.image_seq_length
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A_ : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : Tuple = bbox[i, j, 3]
A_ : int = bbox[i, j, 1]
A_ : List[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Tuple = bbox[i, j, 2]
A_ : Dict = bbox[i, j, 0]
A_ : Optional[Any] = tmp_coordinate
A_ : str = tf.constant(_SCREAMING_SNAKE_CASE )
A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Any = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ : Union[str, Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : Optional[int] = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : List[str] = TFLayoutLMvaModel(config=_SCREAMING_SNAKE_CASE )
# text + image
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
A_ : str = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ : Any = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ : Union[str, Any] = model({'''pixel_values''': pixel_values} , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : str = self.num_labels
A_ : int = TFLayoutLMvaForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : int = self.num_labels
A_ : Dict = TFLayoutLMvaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
A_ : Dict = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : str = 2
A_ : int = TFLayoutLMvaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
A_ : List[str] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(A_) : Union[str, Any] = config_and_inputs
A_ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return True
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->dict:
'''simple docstring'''
A_ : List[str] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
A_ : Tuple = {
k: tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
A_ : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
A_ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A_ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
A_ : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMvaModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
if getattr(_SCREAMING_SNAKE_CASE , '''hf_compute_loss''' , _SCREAMING_SNAKE_CASE ):
# The number of elements in the loss should be the same as the number of elements in the label
A_ : Dict = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ : List[str] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_SCREAMING_SNAKE_CASE )[0]
]
A_ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A_ : Any = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = prepared_for_class.pop('''input_ids''' )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A_ : Dict = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ : Dict = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
A_ : List[str] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A_ : Optional[int] = -100
A_ : int = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A_ : int = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A_ : str = self._prepare_for_class(inputs_dict.copy() , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
# Get keys that were added with the _prepare_for_class function
A_ : str = prepared_for_class.keys() - inputs_dict.keys()
A_ : int = inspect.signature(model.call ).parameters
A_ : Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A_ : List[Any] = {0: '''input_ids'''}
for label_key in label_keys:
A_ : Any = signature_names.index(_SCREAMING_SNAKE_CASE )
A_ : Dict = label_key
A_ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A_ : List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A_ : Any = prepared_for_class[value]
A_ : str = tuple(_SCREAMING_SNAKE_CASE )
# Send to model
A_ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self )->Any:
'''simple docstring'''
(
A_
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
(
A_
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Any = type
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->str:
'''simple docstring'''
(
A_
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->str:
'''simple docstring'''
(
A_
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
(
A_
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = TFLayoutLMvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->Any:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
A_ : Optional[Any] = self.default_image_processor
A_ : Any = prepare_img()
A_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).pixel_values
A_ : Optional[Any] = tf.constant([[1, 2]] )
A_ : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A_ : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
A_ : int = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
A_ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 361 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if num <= 0:
A_ : Optional[int] = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = [True] * (num + 1)
A_ : Tuple = []
A_ : Union[str, Any] = 2
A_ : Any = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 65 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : str = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "camembert"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> Any:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : Any = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : str = position_embedding_type
lowercase__ : Optional[Any] = use_cache
lowercase__ : Any = classifier_dropout
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 77 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
return choice(UpperCamelCase )
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : int ):
'''simple docstring'''
_a = random_pivot(UpperCamelCase )
# partition based on pivot
# linear time
_a = [e for e in lst if e < pivot]
_a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCamelCase ) < k - 1:
return kth_number(UpperCamelCase , k - len(UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str=0 ):
'''simple docstring'''
return sorted(UpperCamelCase , key=lambda UpperCamelCase : x[column] )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str]=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase ):
for j in range(max(0 , i - 6 ) , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase , UpperCamelCase )
# recursion
_a = points_counts // 2
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[:mid] , UpperCamelCase )
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_a = min(UpperCamelCase , UpperCamelCase )
_a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase )
_a = dis_between_closest_in_strip(
UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
return min(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = column_based_sort(UpperCamelCase , column=0 )
_a = column_based_sort(UpperCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case : int = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 179 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : str ):
A = 0
for ch in input_str:
A = ord(snake_case__ )
A = pow(2 , snake_case__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : str , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
| 317 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase__ : int = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/'))
__A : Optional[int] = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py') , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py') , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
__A : Any = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__A : Optional[int] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
__A : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
__A : Union[str, Any] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase)
__A : List[Any] = os.path.join(self.diffusers_dir , 'new_code.py')
with open(_UpperCAmelCase , 'w' , newline='\n') as f:
f.write(_UpperCAmelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase)
with open(_UpperCAmelCase , 'r') as f:
self.assertTrue(f.read() , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput')
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase) , )
# Copy consistency with a really long name
__A : int = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase) , ) | 190 |
'''simple docstring'''
from math import pi, sqrt, tan
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__A : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__A : int = (sidea + sidea + sidea) / 2
__A : Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase ( __snake_case : int , __snake_case : float ) -> float:
if not isinstance(__snake_case , __snake_case ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""") | 190 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : List[Any] = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''bloom'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=25_08_80 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
lowercase_ : Optional[int] = kwargs.pop('''n_embed''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = hidden_size if n_embed is None else n_embed
lowercase_ : Union[str, Any] = n_layer
lowercase_ : Union[str, Any] = n_head
lowercase_ : Optional[int] = layer_norm_epsilon
lowercase_ : str = initializer_range
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = pretraining_tp
lowercase_ : Union[str, Any] = apply_residual_connection_post_layernorm
lowercase_ : Union[str, Any] = hidden_dropout
lowercase_ : List[str] = attention_dropout
lowercase_ : Dict = bos_token_id
lowercase_ : int = eos_token_id
lowercase_ : Any = slow_but_exact
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.12''' )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
lowercase_ : Any = 0
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase_ : Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def _snake_case ( self ):
"""simple docstring"""
return self._config.n_head
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-3
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
lowercase_ : Optional[int] = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
lowercase_ : Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase_ , lowercase_ : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase_ : Tuple = seqlen + 2
lowercase_ : Optional[Any] = self._config.hidden_size // self.num_attention_heads
lowercase_ : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase_ : Tuple = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase_ : Union[str, Any] = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
lowercase_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase_ : Any = ordered_inputs['''attention_mask'''].dtype
lowercase_ : Union[str, Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
"""simple docstring"""
return 13
| 93 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
lowerCAmelCase_ = None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 | 1 |
"""simple docstring"""
from math import sqrt
def __SCREAMING_SNAKE_CASE ( lowercase__ = 1_000_000 ):
"""simple docstring"""
A = 0
A = 0
A = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 57 |
"""simple docstring"""
import numpy as np
import datasets
__A : Optional[int] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__A : Any = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__A : List[str] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
# convert to numpy arrays
A = np.array(__SCREAMING_SNAKE_CASE)
A = np.array(__SCREAMING_SNAKE_CASE)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
A = X - np.mean(__SCREAMING_SNAKE_CASE)
A = np.cov(reference_distribution.T)
try:
A = np.linalg.inv(__SCREAMING_SNAKE_CASE)
except np.linalg.LinAlgError:
A = np.linalg.pinv(__SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 57 | 1 |
'''simple docstring'''
UpperCamelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ) -> Dict:
# Return True if there is node that has not iterated.
_lowerCAmelCase : Optional[int] = [False] * len(_lowerCamelCase )
_lowerCAmelCase : int = [s]
_lowerCAmelCase : str = True
while queue:
_lowerCAmelCase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = u
return visited[t]
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : Tuple = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : str = []
_lowerCAmelCase : Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = float("""Inf""" )
_lowerCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : int = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase : Dict = parent[s]
max_flow += path_flow
_lowerCAmelCase : Union[str, Any] = sink
while v != source:
_lowerCAmelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : List[Any] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 309 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309 | 1 |
def lowercase( UpperCamelCase_ = 4000000 ) -> int:
'''simple docstring'''
UpperCamelCase = [0, 1]
UpperCamelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase = 0
for j in range(len(UpperCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 350 | from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase( UpperCamelCase_ = True , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase_ , **UpperCamelCase_ , disable=UpperCamelCase_ )
| 165 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.