code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from jiwer import compute_measures
import datasets
UpperCamelCase_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
UpperCamelCase_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
UpperCamelCase_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence" ),
"references": datasets.Value("string", id="sequence" ),
} ), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
], )
def __UpperCAmelCase ( self, A_=None, A_=None, A_=False ) -> Union[str, Any]:
if concatenate_texts:
return compute_measures(UpperCAmelCase_, UpperCAmelCase_ )["wer"]
else:
UpperCAmelCase__ =0
UpperCAmelCase__ =0
for prediction, reference in zip(UpperCAmelCase_, UpperCAmelCase_ ):
UpperCAmelCase__ =compute_measures(UpperCAmelCase_, UpperCAmelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 625 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : WhisperForConditionalGeneration , UpperCAmelCase_ : WhisperProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors='pt' , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_lowerCAmelCase = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_lowerCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = len(UpperCAmelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_ )}.""" )
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = text_embeddings.shape
_lowerCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase = 42
if negative_prompt is None:
_lowerCAmelCase = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !="""
F""" {type(UpperCAmelCase_ )}.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = uncond_embeddings.shape[1]
_lowerCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = 1 / 0.18215 * latents
_lowerCAmelCase = self.vae.decode(UpperCAmelCase_ ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 580 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class _UpperCamelCase (a_ ):
snake_case_ = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PIL.Image.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(__UpperCamelCase , param_name="crop_size" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PIL.Image.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
__UpperCamelCase , size=(size["height"], size["width"]) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> Optional[int]:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__UpperCamelCase )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__UpperCamelCase , param_name="crop_size" )
__lowerCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 290 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _UpperCamelCase (a_ ):
snake_case_ = """instructblip_vision_model"""
def __init__( self , __UpperCamelCase=1_4_0_8 , __UpperCamelCase=6_1_4_4 , __UpperCamelCase=3_9 , __UpperCamelCase=1_6 , __UpperCamelCase=2_2_4 , __UpperCamelCase=1_4 , __UpperCamelCase="gelu" , __UpperCamelCase=1e-6 , __UpperCamelCase=0.0 , __UpperCamelCase=1e-10 , __UpperCamelCase=True , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = patch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = hidden_act
__lowerCAmelCase = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__lowerCAmelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _UpperCamelCase (a_ ):
snake_case_ = """instructblip_qformer"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=2 , __UpperCamelCase=1_4_0_8 , **__UpperCamelCase , )-> Any:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = cross_attention_frequency
__lowerCAmelCase = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__lowerCAmelCase = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _UpperCamelCase (a_ ):
snake_case_ = """instructblip"""
snake_case_ = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=3_2 , **__UpperCamelCase )-> Tuple:
super().__init__(**__UpperCamelCase )
if vision_config is None:
__lowerCAmelCase = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
__lowerCAmelCase = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
__lowerCAmelCase = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
__lowerCAmelCase = InstructBlipVisionConfig(**__UpperCamelCase )
__lowerCAmelCase = InstructBlipQFormerConfig(**__UpperCamelCase )
__lowerCAmelCase = text_config["model_type"] if "model_type" in text_config else "opt"
__lowerCAmelCase = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
__lowerCAmelCase = self.text_config.tie_word_embeddings
__lowerCAmelCase = self.text_config.is_encoder_decoder
__lowerCAmelCase = num_query_tokens
__lowerCAmelCase = self.vision_config.hidden_size
__lowerCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowerCAmelCase = 1.0
__lowerCAmelCase = 0.0_2
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> int:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCamelCase , )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.qformer_config.to_dict()
__lowerCAmelCase = self.text_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 290 | 1 |
'''simple docstring'''
def a_ ( __snake_case : float ) -> float:
"""simple docstring"""
return 10 - x * x
def a_ ( __snake_case : float , __snake_case : float ) -> float:
"""simple docstring"""
# Bolzano theory in order to find if there is a root between a and b
if equation(__snake_case ) * equation(__snake_case ) >= 0:
raise ValueError('''Wrong space!''' )
lowerCamelCase_ =a
while (b - a) >= 0.0_1:
# Find middle point
lowerCamelCase_ =(a + b) / 2
# Check if middle point is root
if equation(__snake_case ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__snake_case ) * equation(__snake_case ) < 0:
lowerCamelCase_ =c
else:
lowerCamelCase_ =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 676 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : str = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "big_bird"
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=5_0358 , __magic_name__ : Union[str, Any]=768 , __magic_name__ : Tuple=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : List[str]=3072 , __magic_name__ : Dict="gelu_new" , __magic_name__ : Any=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Union[str, Any]=4096 , __magic_name__ : List[str]=2 , __magic_name__ : str=0.02 , __magic_name__ : Dict=1e-12 , __magic_name__ : int=True , __magic_name__ : Optional[int]=0 , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=66 , __magic_name__ : Any="block_sparse" , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=64 , __magic_name__ : str=3 , __magic_name__ : Optional[Any]=None , **__magic_name__ : Tuple , ) -> Optional[Any]:
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , sep_token_id=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : Optional[int] = layer_norm_eps
lowerCamelCase_ : List[str] = use_cache
lowerCamelCase_ : Optional[Any] = rescale_embeddings
lowerCamelCase_ : Union[str, Any] = attention_type
lowerCamelCase_ : Optional[int] = use_bias
lowerCamelCase_ : List[str] = block_size
lowerCamelCase_ : int = num_random_blocks
lowerCamelCase_ : int = classifier_dropout
class snake_case_ ( __A ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 253 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( __UpperCAmelCase : int ) -> str:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCamelCase_ : str = precision
lowerCamelCase_ : Optional[Any] = ceil(precision / 14 )
lowerCamelCase_ : Tuple = 426880 * Decimal(10005 ).sqrt()
lowerCamelCase_ : Any = 1
lowerCamelCase_ : List[Any] = 13591409
lowerCamelCase_ : List[str] = Decimal(__UpperCAmelCase )
for k in range(1 , __UpperCAmelCase ):
lowerCamelCase_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case_ : Optional[int] = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 253 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Optional[int] , _a : Union[str, Any]=8 ):
snake_case_ : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ : List[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCAmelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
super().__init__()
self.register_modules(
text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , movq=__a , )
snake_case_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
if latents is None:
snake_case_ : str = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ : Any = latents.to(__a )
snake_case_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
snake_case_ : Optional[int] = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
snake_case_ : Optional[int] = self.tokenizer(
__a , padding="max_length" , truncation=__a , max_length=77 , return_attention_mask=__a , add_special_tokens=__a , return_tensors="pt" , )
snake_case_ : Optional[int] = text_inputs.input_ids
snake_case_ : Optional[int] = self.tokenizer(__a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__a , __a ):
snake_case_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case_ : List[str] = text_input_ids.to(__a )
snake_case_ : str = text_inputs.attention_mask.to(__a )
snake_case_ , snake_case_ : Optional[int] = self.text_encoder(
input_ids=__a , attention_mask=__a )
snake_case_ : List[Any] = prompt_embeds.repeat_interleave(__a , dim=0 )
snake_case_ : int = text_encoder_hidden_states.repeat_interleave(__a , dim=0 )
snake_case_ : List[str] = text_mask.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
snake_case_ : Optional[int] = 42
if negative_prompt is None:
snake_case_ : Optional[int] = [""] * batch_size
elif type(__a ) is not type(__a ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !='''
f''' {type(__a )}.''' )
elif isinstance(__a , __a ):
snake_case_ : List[str] = [negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
snake_case_ : Optional[Any] = negative_prompt
snake_case_ : Tuple = self.tokenizer(
__a , padding="max_length" , max_length=77 , truncation=__a , return_attention_mask=__a , add_special_tokens=__a , return_tensors="pt" , )
snake_case_ : List[str] = uncond_input.input_ids.to(__a )
snake_case_ : Any = uncond_input.attention_mask.to(__a )
snake_case_ , snake_case_ : Any = self.text_encoder(
input_ids=__a , attention_mask=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Dict = negative_prompt_embeds.shape[1]
snake_case_ : str = negative_prompt_embeds.repeat(1 , __a )
snake_case_ : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a )
snake_case_ : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
snake_case_ : Tuple = uncond_text_encoder_hidden_states.repeat(1 , __a , 1 )
snake_case_ : Any = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __a , -1 )
snake_case_ : Dict = uncond_text_mask.repeat_interleave(__a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case_ : Dict = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case_ : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
snake_case_ : Any = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
snake_case_ : Optional[Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ : str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ : Dict = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
if self.safety_checker is not None:
snake_case_ , snake_case_ : List[str] = cpu_offload_with_hook(self.safety_checker , __a , prev_module_hook=__a )
# We'll offload the last model manually.
snake_case_ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Optional[int]:
if isinstance(__a , __a ):
snake_case_ : int = 1
elif isinstance(__a , __a ):
snake_case_ : Optional[int] = len(__a )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__a )}''' )
snake_case_ : Tuple = self._execution_device
snake_case_ : str = batch_size * num_images_per_prompt
snake_case_ : int = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = self._encode_prompt(
__a , __a , __a , __a , __a )
if isinstance(__a , __a ):
snake_case_ : Optional[Any] = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
snake_case_ : List[Any] = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
snake_case_ : List[str] = image_embeds.repeat_interleave(__a , dim=0 )
snake_case_ : Union[str, Any] = negative_image_embeds.repeat_interleave(__a , dim=0 )
snake_case_ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
snake_case_ : Dict = self.scheduler.timesteps
snake_case_ : Tuple = self.unet.config.in_channels
snake_case_ , snake_case_ : Tuple = get_new_h_w(__a , __a , self.movq_scale_factor )
# create initial latent
snake_case_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Tuple = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
snake_case_ : List[str] = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ : str = noise_pred.chunk(2 )
snake_case_ , snake_case_ : Optional[Any] = variance_pred.chunk(2 )
snake_case_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Optional[int] = self.scheduler.step(
__a , __a , __a , generator=__a , ).prev_sample
# post-processing
snake_case_ : Tuple = self.movq.decode(__a , force_not_quantize=__a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ : str = image * 0.5 + 0.5
snake_case_ : Union[str, Any] = image.clamp(0 , 1 )
snake_case_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 568 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""image-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""image""": Image()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
__UpperCAmelCase : str ="image"
__UpperCAmelCase : str ="labels"
def snake_case ( self , __a ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.label_schema.copy()
__lowerCAmelCase = features[self.label_column]
__lowerCAmelCase = label_schema
return task_template
@property
def snake_case ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 636 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = field(default=UpperCamelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowercase = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowercase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowercase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowercase = field(
default=UpperCamelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def snake_case__( self ) -> int:
_a : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(lowercase , lowercase ):
_a : Tuple = v.to_dict()
return d | 701 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase__ ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
_a , _a : int = emb.weight.shape
_a : Optional[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
_a : Dict = emb.weight.data
return lin_layer
def UpperCamelCase__ ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
_a : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
_a : str = Namespace(**checkpoint['''cfg''']['''model'''] )
_a : str = checkpoint['''model''']
remove_ignore_keys_(UpperCAmelCase )
_a : Optional[int] = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_a : Any = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
_a : str = XGLMConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_a : Any = XGLMForCausalLM(UpperCAmelCase )
_a : List[Any] = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
print(UpperCAmelCase )
_a : List[str] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 307 | 0 |
"""simple docstring"""
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : List[str] ) -> None:
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = [0] * size
SCREAMING_SNAKE_CASE__ = [0] * size
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : List[Any] ) -> int:
return index | (index + 1)
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Any ) -> int:
return (index & (index + 1)) - 1
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> None:
SCREAMING_SNAKE_CASE__ = value
while index < self.size:
SCREAMING_SNAKE_CASE__ = self.get_prev(lowerCamelCase_ ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_next(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE__ = 0
while left <= right:
SCREAMING_SNAKE_CASE__ = self.get_prev(lowerCamelCase_ )
if left <= current_left:
SCREAMING_SNAKE_CASE__ = max(lowerCamelCase_ , self.tree[right] )
SCREAMING_SNAKE_CASE__ = current_left
else:
SCREAMING_SNAKE_CASE__ = max(lowerCamelCase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : str = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
_a : Any = Image.open(requests.get(A , stream=A ).raw ).convert('RGB' )
return image
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : Dict = dct.pop(A )
_a : Tuple = val
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_a : Optional[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
_a : str = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_a : Any = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) )
_a : Tuple = qkv_bias
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : int = 3_6_4 if 'coco' in model_name else 2_2_4
_a : List[Any] = InstructBlipVisionConfig(image_size=A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_a : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_a : Tuple = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_a : int = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
_a : int = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_a : Tuple = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
_a : Optional[Any] = InstructBlipConfig(vision_config=A , text_config=A , qformer_config=A )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( A , A=None , A=False ):
'''simple docstring'''
_a : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
_a : int = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_a : Optional[Any] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
_a , _a : Tuple = get_blipa_config(A )
_a : Optional[Any] = InstructBlipForConditionalGeneration(A ).eval()
_a : Union[str, Any] = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
_a , _a : Dict = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_a : int = 'cuda:1' if torch.cuda.is_available() else 'cpu'
_a : Any = 'cuda:2' if torch.cuda.is_available() else 'cpu'
_a , _a , _a : Optional[int] = load_model_and_preprocess(
name=A , model_type=A , is_eval=A , device=A )
original_model.eval()
print('Done!' )
# update state dict keys
_a : Tuple = original_model.state_dict()
_a : List[Any] = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_a : Union[str, Any] = state_dict.pop(A )
if key.startswith('Qformer.bert' ):
_a : Union[str, Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_a : List[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
_a : int = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
_a : str = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
_a : int = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
_a : Tuple = key.replace('t5' , 'language' )
_a : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(A , A )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A , strict=A )
_a : Dict = load_demo_image()
_a : Any = 'What is unusual about this image?'
# create processor
_a : Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=A , image_std=A )
_a : Dict = InstructBlipProcessor(
image_processor=A , tokenizer=A , qformer_tokenizer=A , )
_a : str = processor(images=A , text=A , return_tensors='pt' ).to(A )
# make sure processor creates exact same pixel values
_a : str = vis_processors['eval'](A ).unsqueeze(0 ).to(A )
_a : Optional[int] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A )
original_model.to(A )
hf_model.to(A )
with torch.no_grad():
if "vicuna" in model_name:
_a : Any = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
_a : str = hf_model(**A ).logits
else:
_a : Optional[Any] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
_a : List[Any] = tokenizer('\n' , return_tensors='pt' ).input_ids.to(A )
_a : str = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
_a : List[str] = hf_model(**A , labels=A ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_a : str = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , A , atol=A )
print('Looks ok!' )
print('Generating with original model...' )
_a : Optional[int] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
_a : Dict = hf_model.generate(
**A , do_sample=A , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_a : Tuple = 2
print('Original generation:' , A )
_a : Dict = processor.batch_decode(A , skip_special_tokens=A )
_a : Tuple = [text.strip() for text in output_text]
print('HF generation:' , A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A )
hf_model.save_pretrained(A )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
UpperCAmelCase_ : List[str] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 120 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'donut-swin'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase_=224 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , **UpperCAmelCase_ , ) -> Tuple:
super().__init__(**UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : int = num_channels
lowerCamelCase : str = embed_dim
lowerCamelCase : str = depths
lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : Dict = mlp_ratio
lowerCamelCase : Dict = qkv_bias
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : List[Any] = drop_path_rate
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = use_absolute_embeddings
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase : Tuple = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
| 133 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) )
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if b < 0:
return 1 / actual_power(__lowerCamelCase , __lowerCamelCase )
return actual_power(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 5 |
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Any =inspect.getfile(accelerate.test_utils )
A__ : Optional[int] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
A__ : int =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
A__ : int =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _UpperCAmelCase ( self : Dict ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
A__ : Tuple =["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : List[str] ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
A__ : int =["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[Any] =["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : List[str] ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
A__ : int =["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__A : Any = Accelerator()
__A : str = (accelerator.state.process_index + 2, 10)
__A : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
__A : str = ""
__A : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__A : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__A : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 713 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((A__) , (A__)) : Union[str, Any] =extended_euclid(UpperCamelCase , a % b )
A__ : Dict =a // b
return (y, x - k * y)
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
((A__) , (A__)) : Dict =extended_euclid(UpperCamelCase , UpperCamelCase )
A__ : Dict =na * na
A__ : Any =ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
((A__) , (A__)) : Any =extended_euclid(UpperCamelCase , UpperCamelCase )
if b < 0:
A__ : Tuple =(b % n + n) % n
return b
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ , A__ : int =invert_modulo(UpperCamelCase , UpperCamelCase ), invert_modulo(UpperCamelCase , UpperCamelCase )
A__ : Any =na * na
A__ : Any =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 595 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
a_ = False
@skip_mps
class UpperCAmelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : int = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] ) -> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__UpperCAmelCase = CLIPTextModel(__lowerCAmelCase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: str , __lowerCAmelCase: Dict=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(__lowerCAmelCase ).startswith("mps" ):
__UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
__UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__UpperCAmelCase = __UpperCAmelCase = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self: List[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = "cpu"
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
__UpperCAmelCase = pipe(**__lowerCAmelCase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__UpperCAmelCase = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
__UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _UpperCAmelCase ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def _UpperCAmelCase ( self: Dict ) -> str:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self: str ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def _UpperCAmelCase ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _UpperCAmelCase ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def _UpperCAmelCase ( self: Any ) -> int:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def _UpperCAmelCase ( self: List[str] ) -> Tuple:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls: str ) -> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def _UpperCAmelCase ( cls: List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: Tuple ) -> Any:
'''simple docstring'''
__UpperCAmelCase = torch.manual_seed(51 )
__UpperCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase = "a painting of an elephant with glasses"
__UpperCAmelCase = [5, 7]
__UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , token_indices=__lowerCAmelCase , guidance_scale=7.5 , generator=__lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
__UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 221 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 221 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
lowerCAmelCase__ ={"target_lang": "fi", "source_lang": "en"}
lowerCAmelCase__ =">>zh<<"
lowerCAmelCase__ ="Helsinki-NLP/"
if is_torch_available():
lowerCAmelCase__ ="pt"
elif is_tf_available():
lowerCAmelCase__ ="tf"
else:
lowerCAmelCase__ ="jax"
@require_sentencepiece
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = MarianTokenizer
lowerCAmelCase = False
lowerCAmelCase = True
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _a ( self : Any ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''</s>'''
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCAmelCase__ ) , 9 )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _a ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
__SCREAMING_SNAKE_CASE = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(UpperCAmelCase__ , batch.input_ids[0] )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [x.name for x in Path(UpperCAmelCase__ ).glob('''*''' )]
self.assertIn('''source.spm''' , UpperCAmelCase__ )
MarianTokenizer.from_pretrained(UpperCAmelCase__ )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tok(
['''I am a small frog''' * 10_00, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _a ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {'''input_ids''': [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def _a ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__SCREAMING_SNAKE_CASE = '''Tämä on testi'''
__SCREAMING_SNAKE_CASE = '''This is a test'''
__SCREAMING_SNAKE_CASE = [76, 7, 20_47, 2]
__SCREAMING_SNAKE_CASE = [69, 12, 11, 9_40, 2]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ ).input_ids
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer(text_target=UpperCAmelCase__ ).input_ids
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 721 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ =logging.get_logger(__name__)
class A__( __magic_name__ ):
lowerCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = log_spec - 20.0
__SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__SCREAMING_SNAKE_CASE = audio_features[i]
__SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
__SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features}
__SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 690 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["PoolFormerFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 298 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : List[Any] = "\\n Text data.\n Second line of data."
_snake_case : Tuple = "file"
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__snake_case : str = input_paths[compression_format]
__snake_case : Optional[Any] = tmp_path / "cache"
__snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
__snake_case : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = "custom_cache"
__snake_case : List[str] = "custom_extracted_dir"
__snake_case : Any = tmp_path / "custom_extracted_path"
if default_extracted:
__snake_case : List[Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
__snake_case : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__snake_case : Optional[int] = xz_file
__snake_case : Optional[int] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__snake_case : str = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : Optional[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__snake_case : Any = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__snake_case : Optional[int] = "./__missing_file__.txt"
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__lowerCamelCase ) as f:
__snake_case : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 81 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> List[Any]:
_A = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias'''))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
])
return rename_keys
def snake_case ( snake_case__ :List[Any] , snake_case__ :List[Any]) -> List[Any]:
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
_A = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''')
_A = in_proj_weight[
: encoder_config.hidden_size, :
]
_A = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_A = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :Any) -> Dict:
_A = dct.pop(snake_case__)
_A = val
def snake_case ( snake_case__ :Optional[int]) -> int:
if "handwritten" in checkpoint_url:
_A = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_A = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("""RGB""")
return im
@torch.no_grad()
def snake_case ( snake_case__ :List[Any] , snake_case__ :Dict) -> List[Any]:
_A = ViTConfig(image_size=384 , qkv_bias=snake_case__)
_A = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_A = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_A = 1_024
_A = 4_096
_A = 24
_A = 16
_A = 1_024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""")
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_A = False
_A = """relu"""
_A = 1_024
_A = True
_A = False
_A = False
# load HuggingFace model
_A = ViTModel(snake_case__ , add_pooling_layer=snake_case__)
_A = TrOCRForCausalLM(snake_case__)
_A = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__)
model.eval()
# load state_dict of original model, rename some keys
_A = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" , check_hash=snake_case__)["""model"""]
_A = create_rename_keys(snake_case__ , snake_case__)
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__)
read_in_q_k_v(snake_case__ , snake_case__)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_A = state_dict.pop(snake_case__)
if key.startswith("""decoder""") and "output_projection" not in key:
_A = val
else:
_A = val
# load state dict
model.load_state_dict(snake_case__)
# Check outputs on an image
_A = ViTImageProcessor(size=encoder_config.image_size)
_A = RobertaTokenizer.from_pretrained("""roberta-large""")
_A = TrOCRProcessor(snake_case__ , snake_case__)
_A = processor(images=prepare_img(snake_case__) , return_tensors="""pt""").pixel_values
# verify logits
_A = torch.tensor([[model.config.decoder.decoder_start_token_id]])
_A = model(pixel_values=snake_case__ , decoder_input_ids=snake_case__)
_A = outputs.logits
_A = torch.Size([1, 1, 50_265])
if "trocr-base-handwritten" in checkpoint_url:
_A = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311])
elif "trocr-large-handwritten" in checkpoint_url:
_A = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170])
elif "trocr-base-printed" in checkpoint_url:
_A = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210])
elif "trocr-large-printed" in checkpoint_url:
_A = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case__ , atol=1E-3), "First elements of logits not as expected"
Path(snake_case__).mkdir(exist_ok=snake_case__)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(snake_case__)
print(F'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 83 | import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_SCREAMING_SNAKE_CASE = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_SCREAMING_SNAKE_CASE = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :List[str]=False , snake_case__ :Dict=False , snake_case__ :Any=True , snake_case__ :List[str]=False , snake_case__ :Optional[Any]="dummy_doc") -> List[Any]:
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(snake_case__ , key_doc_lines[doc] , snake_case__)
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
_A , _A = reader.get_doc_mentions(snake_case__ , sys_doc_lines[doc] , snake_case__)
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(snake_case__ , key_doc_lines[doc] , snake_case__ , snake_case__)
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(snake_case__ , snake_case__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = reader.get_mention_assignments(snake_case__ , snake_case__)
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''')
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''')
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""")
return doc_coref_infos
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :Tuple) -> int:
_A = get_coref_infos(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(snake_case__ , snake_case__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa})
logger.info(
name.ljust(10) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''')
output_scores.update({"""conll_score""": conll})
return output_scores
def snake_case ( snake_case__ :Union[str, Any]) -> List[Any]:
_A = False
for line in key_lines:
if not line.startswith("""#"""):
if len(line.split()) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Union[str, Any]:
_A = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(lowerCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=lowerCAmelCase_ , sys_lines=lowerCAmelCase_ , metrics=lowerCAmelCase_ , NP_only=lowerCAmelCase_ , remove_nested=lowerCAmelCase_ , keep_singletons=lowerCAmelCase_ , min_span=lowerCAmelCase_ , )
return score
| 83 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : Tuple ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
if "model" in sd.keys():
_lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
_lowerCAmelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
_lowerCAmelCase = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase = sd.pop(snake_case_ )
_lowerCAmelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase = key.replace(""".qkv_proj.""" , """.q_proj.""" )
_lowerCAmelCase = key.replace(""".qkv_proj.""" , """.k_proj.""" )
_lowerCAmelCase = key.replace(""".qkv_proj.""" , """.v_proj.""" )
_lowerCAmelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = torch.split(snake_case_ , depth // 3 , dim=0 )
_lowerCAmelCase = q
_lowerCAmelCase = k
_lowerCAmelCase = v
del sd[key]
return sd
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : int=None ) -> str:
"""simple docstring"""
_lowerCAmelCase = load_checkpoint(snake_case_ )
if config is not None:
_lowerCAmelCase = OPTConfig.from_pretrained(snake_case_ )
else:
_lowerCAmelCase = OPTConfig()
_lowerCAmelCase = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 156 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''Salesforce/codegen-350M-mono''': 2_0_4_8,
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = CodeGenTokenizer
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
if kwargs.pop("""add_bos_token""" , lowerCamelCase ):
_lowerCAmelCase = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = getattr(lowerCamelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = pre_tok_class(**lowerCamelCase )
_lowerCAmelCase = add_prefix_space
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = super().decode(
token_ids=lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
_lowerCAmelCase = self.truncate(lowerCamelCase , lowerCamelCase )
return decoded_text
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = pattern.search(lowerCamelCase , lowerCamelCase )
return m.start() if m else -1
_lowerCAmelCase = [re.compile(lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCAmelCase = list(re.finditer("""^print""" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
_lowerCAmelCase = completion[: prints[1].start()]
_lowerCAmelCase = list(re.finditer("""^def""" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
_lowerCAmelCase = completion[: defs[1].start()]
_lowerCAmelCase = 0
_lowerCAmelCase = [
pos for pos in [find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion | 156 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _lowerCamelCase :
def __init__( self , lowerCAmelCase = "cpu" , lowerCAmelCase = "openai/clip-vit-large-patch14" ) -> None:
SCREAMING_SNAKE_CASE__: str= device
SCREAMING_SNAKE_CASE__: Union[str, Any]= CLIPTokenizerFast.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= [0.48145466, 0.4578275, 0.40821073]
SCREAMING_SNAKE_CASE__: Any= [0.26862954, 0.26130258, 0.27577711]
SCREAMING_SNAKE_CASE__: Optional[int]= torchvision.transforms.Normalize(self.image_mean , self.image_std )
SCREAMING_SNAKE_CASE__: str= torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE__: Optional[int]= torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: str= self.resize(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= self.center_crop(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= self.normalize(lowerCAmelCase )
return images
def __call__( self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.tokenizer(text=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.preprocess_img(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _lowerCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase=10 , lowerCAmelCase=0.01 , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="image" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE__: Tuple= vqgan
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= load_vqgan(self.device , conf_path=lowerCAmelCase , ckpt_path=lowerCAmelCase )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE__: List[str]= clip
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE__: Optional[int]= ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE__: Union[str, Any]= iterations
SCREAMING_SNAKE_CASE__: str= lr
SCREAMING_SNAKE_CASE__: List[Any]= log
SCREAMING_SNAKE_CASE__: List[str]= make_grid
SCREAMING_SNAKE_CASE__: List[Any]= return_val
SCREAMING_SNAKE_CASE__: Union[str, Any]= quantize
SCREAMING_SNAKE_CASE__: List[Any]= self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=5 , lowerCAmelCase=True ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
if output_path is None:
SCREAMING_SNAKE_CASE__: Dict= "./animation.gif"
if input_path is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.save_path
SCREAMING_SNAKE_CASE__: Union[str, Any]= sorted(glob(input_path + '''/*''' ) )
if not len(lowerCAmelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCAmelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
SCREAMING_SNAKE_CASE__: List[str]= total_duration / len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= [frame_duration] * len(lowerCAmelCase )
if extend_frames:
SCREAMING_SNAKE_CASE__: Dict= 1.5
SCREAMING_SNAKE_CASE__: List[str]= 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCAmelCase ) )
imageio.mimsave(lowerCAmelCase , lowerCAmelCase , duration=lowerCAmelCase )
print(f'gif saved to {output_path}' )
def UpperCamelCase_ ( self , lowerCAmelCase=None , lowerCAmelCase=None ) -> str:
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE__: Dict= preprocess(Image.open(lowerCAmelCase ) , target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE__: Tuple= preprocess_vqgan(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= self.vqgan.encode(lowerCAmelCase )
return z
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE__: Dict= base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE__: str= self.vqgan.quantize(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: int= trans_latent
return self.vqgan.decode(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.clip_preprocessor(text=lowerCAmelCase , images=lowerCAmelCase , return_tensors='''pt''' , padding=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.clip(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE__: str= similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= self._get_clip_similarity(pos_prompts['''prompts'''] , lowerCAmelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
SCREAMING_SNAKE_CASE__: Tuple= self._get_clip_similarity(neg_prompts['''prompts'''] , lowerCAmelCase , weights=neg_prompts['''weights'''] )
else:
SCREAMING_SNAKE_CASE__: str= torch.tensor([1] , device=self.device )
SCREAMING_SNAKE_CASE__: Optional[int]= -torch.log(lowerCAmelCase ) + torch.log(lowerCAmelCase )
return loss
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: int= torch.randn_like(self.latent , requires_grad=lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE__: Any= torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE__: Tuple= self._add_vector(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= loop_post_process(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self._get_CLIP_loss(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
print('''CLIP loss''' , lowerCAmelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
wandb.init(reinit=lowerCAmelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE__: List[str]= Image.open(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(lowerCAmelCase ) )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
if not prompts:
return []
SCREAMING_SNAKE_CASE__: int= []
SCREAMING_SNAKE_CASE__: Any= []
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: int= [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCAmelCase , (tuple, list) ):
SCREAMING_SNAKE_CASE__: List[Any]= prompt[0]
SCREAMING_SNAKE_CASE__: List[Any]= float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE__: str= prompt.split(''':''' )
SCREAMING_SNAKE_CASE__: int= float(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: int= prompt
SCREAMING_SNAKE_CASE__: Tuple= 1.0
processed_prompts.append(lowerCAmelCase )
weights.append(lowerCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase , device=self.device ),
}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=None , ) -> Union[str, Any]:
if image_path:
SCREAMING_SNAKE_CASE__: Tuple= self._get_latent(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: str= torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE__: Dict= self.process_prompts(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.process_prompts(lowerCAmelCase )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE__: Any= os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: str= save_path + "_" + get_timestamp()
os.makedirs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= save_path
SCREAMING_SNAKE_CASE__: List[Any]= self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: int= loop_post_process(lowerCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) ):
if show_intermediate:
show_pil(lowerCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCAmelCase )} )
if show_final:
show_pil(lowerCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
| 713 | import os
def A__ ( snake_case_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) as in_file:
SCREAMING_SNAKE_CASE__: Dict= in_file.read()
SCREAMING_SNAKE_CASE__: Optional[int]= [[int(snake_case_ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__: int= [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__: Dict= len(grid[0] )
SCREAMING_SNAKE_CASE__: Dict= [[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
SCREAMING_SNAKE_CASE__: Optional[int]= grid[0][0]
for i in range(1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[int]= grid[0][i] + dp[0][i - 1]
for i in range(1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= grid[i][0] + dp[i - 1][0]
for i in range(1 , snake_case_ ):
for j in range(1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: Dict= grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 107 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : jnp.ndarray
@flax_register_to_config
class __a ( nn.Module , __a , __a ):
'''simple docstring'''
_lowerCamelCase : int = 32
_lowerCamelCase : int = 4
_lowerCamelCase : int = 4
_lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowerCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_lowerCamelCase : Union[bool, Tuple[bool]] = False
_lowerCamelCase : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
_lowerCamelCase : int = 2
_lowerCamelCase : Union[int, Tuple[int]] = 8
_lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
_lowerCamelCase : int = 12_80
_lowerCamelCase : float = 0.0
_lowerCamelCase : bool = False
_lowerCamelCase : jnp.dtype = jnp.floataa
_lowerCamelCase : bool = True
_lowerCamelCase : int = 0
_lowerCamelCase : bool = False
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> FrozenDict:
'''simple docstring'''
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
__lowercase = jnp.ones((1,) , dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(_lowerCamelCase )
__lowercase = {"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
__lowercase = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowercase = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
__lowercase = down_blocks
# mid
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowercase = []
__lowercase = list(reversed(_lowerCamelCase ) )
__lowercase = list(reversed(_lowerCamelCase ) )
__lowercase = list(reversed(_lowerCamelCase ) )
__lowercase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowercase = output_channel
__lowercase = reversed_block_out_channels[i]
__lowercase = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
__lowercase = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowercase = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowercase = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
__lowercase = output_channel
__lowercase = up_blocks
# out
__lowercase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
__lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(_lowerCamelCase , 0 )
__lowercase = self.time_proj(_lowerCamelCase )
__lowercase = self.time_embedding(_lowerCamelCase )
# 2. pre-process
__lowercase = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
__lowercase = self.conv_in(_lowerCamelCase )
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase , __lowercase = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
__lowercase , __lowercase = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowercase = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowercase = new_down_block_res_samples
# 4. mid
__lowercase = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowercase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowercase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
__lowercase = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
__lowercase = self.conv_norm_out(_lowerCamelCase )
__lowercase = nn.silu(_lowerCamelCase )
__lowercase = self.conv_out(_lowerCamelCase )
__lowercase = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 118 |
"""simple docstring"""
def lowerCAmelCase__ ( __magic_name__ = 1_0 ) ->str:
if not isinstance(__magic_name__ , __magic_name__ ) or n < 0:
raise ValueError("Invalid input" )
__lowercase = 1_0**n
__lowercase = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __magic_name__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 118 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a: Any = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Any = ["""ViTFeatureExtractor"""]
_a: int = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: List[Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Any = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: int = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 705 |
from __future__ import annotations
from collections import Counter
from random import random
class __UpperCamelCase :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = {}
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = {}
def __A ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCAmelCase )
if nodea not in self.connections:
self.add_node(lowerCAmelCase )
UpperCAmelCase_ = probability
def __A ( self : Tuple ):
'''simple docstring'''
return list(self.connections )
def __A ( self : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A , A , A )
UpperCAmelCase_ = Counter(graph.get_nodes() )
UpperCAmelCase_ = start
for _ in range(A ):
UpperCAmelCase_ = graph.transition(A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 268 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCAmelCase ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ):
"""simple docstring"""
if not arr:
return 0
__UpperCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
__UpperCAmelCase = 0.0
for num in arr:
__UpperCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
__UpperCAmelCase = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : int = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 262 | '''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
__UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__UpperCAmelCase = '''.'''.join(UpperCamelCase__ )
return test_module_path
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_module_path(UpperCamelCase__ )
__UpperCAmelCase = importlib.import_module(UpperCamelCase__ )
return test_module
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
__UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCAmelCase = getattr(UpperCamelCase__ , '''all_model_classes''' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = test_class()
if hasattr(UpperCamelCase__ , '''setUp''' ):
test.setUp()
__UpperCAmelCase = None
if hasattr(UpperCamelCase__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCAmelCase = test.model_tester.__class__
return model_tester
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = []
for test_class in test_classes:
__UpperCAmelCase = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_model_classes(UpperCamelCase__ )
__UpperCAmelCase = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = get_model_classes(UpperCamelCase__ )
__UpperCAmelCase = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 262 | 1 |
'''simple docstring'''
import heapq
def lowercase_ ( __A : dict ) -> set[int]:
"""simple docstring"""
lowercase : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__A , [-1 * len(__A ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase : List[str] =heapq.heappop(__A )[1][0]
chosen_vertices.add(__A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase : str =elem[1][1].index(__A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "sew"
def __init__( self : List[str] , _A : Union[str, Any]=32 , _A : str=768 , _A : List[str]=12 , _A : List[str]=12 , _A : Optional[int]=3072 , _A : Optional[int]=2 , _A : str="gelu" , _A : Dict=0.1 , _A : List[Any]=0.1 , _A : Any=0.1 , _A : Dict=0.0 , _A : str=0.1 , _A : Optional[int]=0.1 , _A : Dict=0.02 , _A : Optional[int]=1e-5 , _A : Dict="group" , _A : Optional[Any]="gelu" , _A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Optional[int]=False , _A : str=128 , _A : Any=16 , _A : List[Any]=True , _A : Optional[int]=0.05 , _A : Tuple=10 , _A : List[str]=2 , _A : List[str]=0.0 , _A : Optional[int]=10 , _A : int=0 , _A : Optional[int]="mean" , _A : Dict=False , _A : List[Any]=False , _A : List[Any]=256 , _A : List[Any]=0 , _A : List[str]=1 , _A : List[str]=2 , **_A : int , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = conv_bias
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim )
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = squeeze_factor
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# sequence classification
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 10 | """simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__A = 637_8137.0
__A = 635_6752.31_4245
__A = 6_3_7_8_1_3_7
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowercase__: str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__: Tuple = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowercase__: Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__: Dict = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__: Dict = (b_lata + b_lata) / 2
lowercase__: Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__: List[Any] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowercase__: int = cos(sigma / 2 ) ** 2
lowercase__: Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__: Optional[int] = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowercase__: List[Any] = sin(sigma / 2 ) ** 2
lowercase__: str = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case_ = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
snake_case_ = None
def __lowercase ():
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=_SCREAMING_SNAKE_CASE , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE : List[str] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __lowercase (_SCREAMING_SNAKE_CASE :str ):
def remove_articles(_SCREAMING_SNAKE_CASE :Optional[Any] ):
return ARTICLES_REGEX.sub(''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE :str ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE :Tuple ):
SCREAMING_SNAKE_CASE : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE :Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = get_tokens(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = get_tokens(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE : Tuple = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE : str = qa['''id''']
SCREAMING_SNAKE_CASE : Tuple = [t for t in qa['''answers''']['''text'''] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE : Dict = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
SCREAMING_SNAKE_CASE : Tuple = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE : int = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
SCREAMING_SNAKE_CASE : Optional[Any] = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE : int = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE : Dict = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE : Optional[int] = s
return new_scores
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :int=None ):
if not qid_list:
SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
SCREAMING_SNAKE_CASE : Any = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Optional[int] ):
for k in new_eval:
SCREAMING_SNAKE_CASE : Tuple = new_eval[k]
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Dict ):
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any]=None , _SCREAMING_SNAKE_CASE :int=None ):
SCREAMING_SNAKE_CASE : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
SCREAMING_SNAKE_CASE : int = 0.0
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
SCREAMING_SNAKE_CASE : Optional[int] = [1.0]
SCREAMING_SNAKE_CASE : Optional[int] = [0.0]
SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE : Dict = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE : List[str] = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :str ):
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE : Tuple = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
SCREAMING_SNAKE_CASE : Tuple = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
SCREAMING_SNAKE_CASE : str = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE : Dict = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''pr_exact''' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''pr_f1''' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''pr_oracle''' )
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Dict ):
if not qid_list:
return
SCREAMING_SNAKE_CASE : Dict = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE : List[Any] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE : Optional[Any] = num_no_ans
SCREAMING_SNAKE_CASE : str = cur_score
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
SCREAMING_SNAKE_CASE : Optional[int] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE : int = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE : List[Any] = -1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE : List[Any] = cur_score
SCREAMING_SNAKE_CASE : Dict = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Optional[int] , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = best_exact
SCREAMING_SNAKE_CASE : Union[str, Any] = exact_thresh
SCREAMING_SNAKE_CASE : str = best_fa
SCREAMING_SNAKE_CASE : Any = fa_thresh
def __lowercase ():
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE : List[str] = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE : str = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : int = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE : Tuple = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
SCREAMING_SNAKE_CASE : List[str] = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE : Optional[Any] = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE : Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
SCREAMING_SNAKE_CASE : List[str] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''HasAns''' )
if no_ans_qids:
SCREAMING_SNAKE_CASE : Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
snake_case_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 355 |
'''simple docstring'''
from __future__ import annotations
snake_case_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowercase (_SCREAMING_SNAKE_CASE :list[list[int]] , _SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :list[list[int]] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
SCREAMING_SNAKE_CASE : Union[str, Any] = init[0]
SCREAMING_SNAKE_CASE : Optional[Any] = init[1]
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : Union[str, Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : List[str] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : Any = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : List[str] = cell.pop()
SCREAMING_SNAKE_CASE : List[str] = next_cell[2]
SCREAMING_SNAKE_CASE : Dict = next_cell[3]
SCREAMING_SNAKE_CASE : str = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Dict = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Optional[Any] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = g + cost
SCREAMING_SNAKE_CASE : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = goal[0]
SCREAMING_SNAKE_CASE : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : str = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : str = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : Tuple = xa
SCREAMING_SNAKE_CASE : Dict = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case_ = [0, 0]
# all coordinates are given in format [y,x]
snake_case_ = [len(grid) - 1, len(grid[0]) - 1]
snake_case_ = 1
# the cost map which pushes the path closer to the goal
snake_case_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case_ = 99
snake_case_ , snake_case_ = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 355 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_UpperCAmelCase : Optional[int] = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
lowerCamelCase__ : Optional[Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(_UpperCAmelCase ).content ).xpath(_UpperCAmelCase ) )
_UpperCAmelCase : int = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 295 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any=3 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : int=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : int=4 , UpperCAmelCase : List[str]=None , ) -> Dict:
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : int = scope
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Any ) -> Any:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase , )
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Tuple = FalconModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[Any] = FalconModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
lowerCamelCase__ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : str , ) -> List[str]:
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , ) -> List[str]:
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
lowerCamelCase__ : List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
lowerCamelCase__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
lowerCamelCase__ : Tuple = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[str] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = FalconModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A_ ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ , *lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase__ : List[Any] = alibi
self.model_tester.create_and_check_model(UpperCAmelCase , *UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = input_dict['input_ids']
lowerCamelCase__ : Dict = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : int = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : List[str] = 'single_label_classification'
lowerCamelCase__ : Optional[int] = input_dict['input_ids']
lowerCamelCase__ : Union[str, Any] = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[str] = input_dict['input_ids']
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : int = input_ids.shape[0]
lowerCamelCase__ : str = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase__ : Optional[Any] = model._convert_cache_to_standard_format(UpperCAmelCase , UpperCAmelCase )
for layer in range(len(UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A_ ( self : List[str] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : List[str] = 'multi_label_classification'
lowerCamelCase__ : Optional[int] = input_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Any = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Any ) -> Tuple:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase , 'use_cache' ):
return
lowerCamelCase__ : Dict = model_class(UpperCAmelCase ).to(UpperCAmelCase )
if "use_cache" not in inputs:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Any = model(**UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase__ : Any = (
getattr(UpperCAmelCase , 'decoder_layers' , UpperCAmelCase )
or getattr(UpperCAmelCase , 'num_decoder_layers' , UpperCAmelCase )
or config.num_hidden_layers
)
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , 'num_kv_heads' , config.num_attention_heads )
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , 'd_model' , config.hidden_size )
lowerCamelCase__ : str = embed_dim // num_attention_heads
lowerCamelCase__ : List[Any] = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Any = inputs['input_ids'].shape
for i in range(UpperCAmelCase ):
if config.new_decoder_architecture:
lowerCamelCase__ : int = config.num_attention_heads
elif config.multi_query:
lowerCamelCase__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> Union[str, Any]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCamelCase__ : List[Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCamelCase__ : Optional[Any] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=19 )
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def A_ ( self : Any ) -> int:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : int = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def A_ ( self : Union[str, Any] ) -> str:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(device=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# Test results are the same with and without cache
lowerCamelCase__ : List[str] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 , use_cache=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 , use_cache=UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 295 | 1 |
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__lowerCAmelCase = str(bin(UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__lowerCAmelCase = str(bin(UpperCamelCase ) )[2:]
if shift_amount >= len(UpperCamelCase ):
return "0b0"
__lowerCAmelCase = binary_number[: len(UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
__lowerCAmelCase = "0" + str(bin(UpperCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
__lowerCAmelCase = len(bin(UpperCamelCase )[3:] ) # Find 2's complement of number
__lowerCAmelCase = bin(abs(UpperCamelCase ) - (1 << binary_number_length) )[3:]
__lowerCAmelCase = (
"1" + "0" * (binary_number_length - len(UpperCamelCase )) + binary_number
)
if shift_amount >= len(UpperCamelCase ):
return "0b" + binary_number[0] * len(UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a ( __UpperCAmelCase ):
lowercase_ : BigBirdConfig
lowercase_ : jnp.dtype = jnp.floataa
lowercase_ : bool = True
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
super().setup()
__lowerCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = super().__call__(*snake_case__ , **snake_case__ )
__lowerCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = FlaxBigBirdForNaturalQuestionsModule
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
def cross_entropy(UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=None ):
__lowerCAmelCase = logits.shape[-1]
__lowerCAmelCase = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype("f4" )
__lowerCAmelCase = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
__lowerCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCAmelCase = reduction(UpperCamelCase )
return loss
__lowerCAmelCase = partial(UpperCamelCase , reduction=jnp.mean )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a :
lowercase_ : str = "google/bigbird-roberta-base"
lowercase_ : int = 3_000
lowercase_ : int = 10_500
lowercase_ : int = 128
lowercase_ : int = 3
lowercase_ : int = 1
lowercase_ : int = 5
# tx_args
lowercase_ : float = 3e-5
lowercase_ : float = 0.0
lowercase_ : int = 20_000
lowercase_ : float = 0.0095
lowercase_ : str = "bigbird-roberta-natural-questions"
lowercase_ : str = "training-expt"
lowercase_ : str = "data/nq-training.jsonl"
lowercase_ : str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=snake_case__ )
__lowerCAmelCase = os.path.join(self.base_dir , self.save_dir )
__lowerCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class a :
lowercase_ : int
lowercase_ : int = 4_096 # no dynamic padding on TPUs
def __call__( self : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.collate_fn(snake_case__ )
__lowerCAmelCase = jax.tree_util.tree_map(snake_case__ , snake_case__ )
return batch
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.fetch_inputs(features["input_ids"] )
__lowerCAmelCase = {
"input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : list ):
"""simple docstring"""
__lowerCAmelCase = [self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : list ):
"""simple docstring"""
__lowerCAmelCase = [1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Optional[Any]=None ):
"""simple docstring"""
if seed is not None:
__lowerCAmelCase = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
__lowerCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name="batch" )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
def loss_fn(UpperCamelCase: Dict ):
__lowerCAmelCase = model_inputs.pop("start_labels" )
__lowerCAmelCase = model_inputs.pop("end_labels" )
__lowerCAmelCase = model_inputs.pop("pooled_labels" )
__lowerCAmelCase = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(UpperCamelCase )
__lowerCAmelCase = jax.value_and_grad(UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = grad_fn(state.params )
__lowerCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
__lowerCAmelCase = jax.lax.pmean(UpperCamelCase , "batch" )
__lowerCAmelCase = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
__lowerCAmelCase = model_inputs.pop("start_labels" )
__lowerCAmelCase = model_inputs.pop("end_labels" )
__lowerCAmelCase = model_inputs.pop("pooled_labels" )
__lowerCAmelCase = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = outputs
__lowerCAmelCase = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class a ( train_state.TrainState ):
lowercase_ : Callable = struct.field(pytree_node=__UpperCAmelCase )
@dataclass
class a :
lowercase_ : Args
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : wandb
lowercase_ : Callable = None
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : str=None ):
"""simple docstring"""
__lowerCAmelCase = model.params
__lowerCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , )
if ckpt_dir is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = restore_checkpoint(snake_case__ , snake_case__ )
__lowerCAmelCase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__lowerCAmelCase , __lowerCAmelCase = build_tx(**snake_case__ )
__lowerCAmelCase = train_state.TrainState(
step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , )
__lowerCAmelCase = args
__lowerCAmelCase = data_collator
__lowerCAmelCase = lr
__lowerCAmelCase = params
__lowerCAmelCase = jax_utils.replicate(snake_case__ )
return state
def UpperCAmelCase__ ( self : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = self.args
__lowerCAmelCase = len(snake_case__ ) // args.batch_size
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = jax.random.split(snake_case__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase = get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ )
__lowerCAmelCase = 0
for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"Running EPOCH-{epoch}" ):
__lowerCAmelCase = self.data_collator(snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__lowerCAmelCase = jax_utils.unreplicate(state.step )
__lowerCAmelCase = running_loss.item() / i
__lowerCAmelCase = self.scheduler_fn(state_step - 1 )
__lowerCAmelCase = self.evaluate(snake_case__ , snake_case__ )
__lowerCAmelCase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__ , commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = get_batched_dataset(snake_case__ , self.args.batch_size )
__lowerCAmelCase = len(snake_case__ ) // self.args.batch_size
__lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase = 0
for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ):
__lowerCAmelCase = self.data_collator(snake_case__ )
__lowerCAmelCase = self.val_step_fn(snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = jax_utils.unreplicate(snake_case__ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=" ... " )
self.model_save_fn(snake_case__ , params=state.params )
with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) )
with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , snake_case__ )
print("DONE" )
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: List[Any] ):
"""simple docstring"""
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=" ... " )
with open(os.path.join(UpperCamelCase , "flax_model.msgpack" ) , "rb" ) as f:
__lowerCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , "opt_state.msgpack" ) , "rb" ) as f:
__lowerCAmelCase = from_bytes(state.opt_state , f.read() )
__lowerCAmelCase = joblib.load(os.path.join(UpperCamelCase , "args.joblib" ) )
__lowerCAmelCase = joblib.load(os.path.join(UpperCamelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCamelCase , "training_state.json" ) , "r" ) as f:
__lowerCAmelCase = json.load(UpperCamelCase )
__lowerCAmelCase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ):
"""simple docstring"""
__lowerCAmelCase = num_train_steps - warmup_steps
__lowerCAmelCase = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
__lowerCAmelCase = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
__lowerCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: str , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
def weight_decay_mask(UpperCamelCase: int ):
__lowerCAmelCase = traverse_util.flatten_dict(UpperCamelCase )
__lowerCAmelCase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
__lowerCAmelCase = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 376 | 0 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a__ : Tuple , a__ : Optional[Any]=13 , a__ : Tuple=7 , a__ : Dict=True , a__ : Dict=True , a__ : List[Any]=False , a__ : int=True , a__ : Optional[Any]=99 , a__ : Union[str, Any]=64 , a__ : Optional[int]=5 , a__ : Union[str, Any]=4 , a__ : Any=64 , a__ : Optional[int]="gelu" , a__ : Optional[int]=0.1 , a__ : Dict=0.1 , a__ : Optional[Any]=512 , a__ : Optional[Any]=16 , a__ : Dict=2 , a__ : int=0.02 , a__ : int=3 , a__ : str=4 , a__ : List[str]=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def __snake_case ( self : List[str] ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __snake_case ( self : str ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : List[Any] , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : List[str] , a__ : str , a__ : str ):
UpperCAmelCase = MPNetModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : str , a__ : Tuple , a__ : List[str] , a__ : int , a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] ):
UpperCAmelCase = MPNetForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] , a__ : Any , a__ : Optional[int] , a__ : Any , a__ : Union[str, Any] , a__ : int , a__ : Optional[Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , a__ : int , a__ : Any , a__ : str , a__ : Dict , a__ : Tuple , a__ : List[Any] ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = MPNetForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[str] , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : Any ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : str ):
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase), (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_lowerCamelCase =(
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = MPNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a__ )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a__ )
def __snake_case ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Tuple ):
UpperCAmelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase = model(a__ )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a__ )
UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 51 |
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = [10, 20, 30, 40, 50, 60]
lowercase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowercase : Optional[int] = 100
self.assertEqual(kp.calc_profit(_A , _A , _A ) , 210 )
def __a ( self : Dict ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Weight can not be negative.''' )
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Profit can not be negative.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(
_A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 217 | 0 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = val
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Optional[int] = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase_ : Any = Node(UpperCAmelCase__ )
else:
self.left.insert(UpperCAmelCase__ )
elif val > self.val:
if self.right is None:
UpperCAmelCase_ : List[str] = Node(UpperCAmelCase__ )
else:
self.right.insert(UpperCAmelCase__ )
else:
UpperCAmelCase_ : int = val
def snake_case ( A__ ,A__ ):
if root:
inorder(root.left ,_UpperCamelCase )
res.append(root.val )
inorder(root.right ,_UpperCamelCase )
def snake_case ( A__ ):
if len(_UpperCamelCase ) == 0:
return arr
UpperCAmelCase_ : Optional[Any] = Node(arr[0] )
for i in range(1 ,len(_UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase_ : Union[str, Any] = []
inorder(_UpperCamelCase ,_UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 463 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 178 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__=2_81_23 ):
lowerCamelCase_ : Any = [1] * (limit + 1)
for i in range(2 ,int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 ,limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase_ : List[str] = set()
lowerCamelCase_ : List[str] = 0
for n in range(1 ,limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowerCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 364 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A (__lowerCamelCase :str , __lowerCamelCase :Dict , __lowerCamelCase :List[str] , __lowerCamelCase :Optional[int] ):
# Initialise PyTorch model
_lowerCAmelCase = FunnelConfig.from_json_file(__lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase = FunnelBaseModel(__lowerCamelCase ) if base_model else FunnelModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
_lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 718 |
'''simple docstring'''
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
assert x is not None
assert y is not None
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = len(__lowerCamelCase )
# declaring the array for storing the dp values
_lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase = """"""
_lowerCAmelCase , _lowerCAmelCase = m, n
while i > 0 and j > 0:
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowercase = """AGGTAB"""
_lowercase = """GXTXAYB"""
_lowercase = 4
_lowercase = """GTAB"""
_lowercase , _lowercase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 162 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase =np.full((len(lowercase_ ), sequence_length, 2) , lowercase_ )
else:
lowercase =np.full((len(lowercase_ ), sequence_length) , lowercase_ )
for i, tensor in enumerate(lowercase_ ):
if padding_side == "right":
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
else:
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
return out_tensor.tolist()
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> str:
'''simple docstring'''
lowercase =ord(lowercase_ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowercase =unicodedata.category(lowercase_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -1_00
UpperCamelCase__ = "pt"
def _A( self , snake_case_ ):
import torch
lowercase ='''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase =self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase =torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase =self.tokenizer.padding_side
if padding_side == "right":
lowercase =[
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
lowercase =[
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
lowercase =[feature['''ner_tags'''] for feature in features]
lowercase =padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
lowercase =[feature['''original_entity_spans'''] for feature in features]
lowercase =padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
lowercase ={k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 72 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("T")
class lowerCAmelCase__ ( Generic[T] ):
UpperCamelCase_ : deque[T] # Cache store of keys
UpperCamelCase_ : set[T] # References of the keys in cache
UpperCamelCase_ : int = 10 # Maximum capacity of cache
def __init__( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = deque()
_UpperCamelCase = set()
if not n:
_UpperCamelCase = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_UpperCamelCase = n
def A_ ( self , a ) -> None:
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def A_ ( self ) -> None:
'''simple docstring'''
for k in self.dq_store:
print(a )
def __repr__( self ) -> str:
'''simple docstring'''
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["YolosFeatureExtractor"]
lowerCamelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
a_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
a_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
a_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def _lowerCAmelCase ( self: int , a: List[str]=None , a: Dict=None , a: Optional[Any]=False) ->List[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(a , a)["wer"]
else:
a_ = 0
a_ = 0
for prediction, reference in zip(a , a):
a_ = compute_measures(a , a)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) | 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ") | 664 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
lowerCamelCase : Union[str, Any] = {"mobilebert-uncased": 5_1_2}
lowerCamelCase : Tuple = {}
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = MobileBertTokenizer
def __init__( self : List[str] , _a : Union[str, Any]=None , _a : List[Any]=None , _a : Union[str, Any]=True , _a : int="[UNK]" , _a : Tuple="[SEP]" , _a : Dict="[PAD]" , _a : Any="[CLS]" , _a : List[str]="[MASK]" , _a : Tuple=True , _a : Dict=None , **_a : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE =getattr(_a , normalizer_state.pop('type' ) )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =strip_accents
_SCREAMING_SNAKE_CASE =tokenize_chinese_chars
_SCREAMING_SNAKE_CASE =normalizer_class(**_a )
_SCREAMING_SNAKE_CASE =do_lower_case
def A ( self : Dict , _a : str , _a : Tuple=None ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 405 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self : List[str] , _a : Dict , _a : Dict=13 , _a : Union[str, Any]=7 , _a : Dict=True , _a : Any=True , _a : Optional[int]=True , _a : List[Any]=True , _a : str=99 , _a : Union[str, Any]=32 , _a : List[Any]=2 , _a : Union[str, Any]=4 , _a : Dict=37 , _a : List[str]="gelu" , _a : Tuple=0.1 , _a : Optional[Any]=0.1 , _a : List[str]=512 , _a : Optional[Any]=16 , _a : List[Any]=2 , _a : int=0.02 , _a : Optional[Any]=3 , _a : Optional[Any]=4 , _a : Optional[Any]=None , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =13
_SCREAMING_SNAKE_CASE =7
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =99
_SCREAMING_SNAKE_CASE =32
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =37
_SCREAMING_SNAKE_CASE ='gelu'
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =512
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =None
def A ( self : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] , _a : Dict , _a : Any , _a : Union[str, Any] , _a : Union[str, Any] , _a : List[str] , _a : str , _a : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel(config=_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , _a : Optional[int] , _a : Tuple , _a : Any , _a : List[str] , _a : int , _a : Dict , _a : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =TFRoFormerForCausalLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : List[str] , _a : List[Any] , _a : Any , _a : List[Any] , _a : Any , _a : List[Any] , _a : Any , _a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , _a : List[Any] , _a : Optional[Any] , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : str , _a : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForSequenceClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _a : int , _a : str , _a : Any , _a : Tuple , _a : int , _a : List[str] , _a : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =TFRoFormerForMultipleChoice(config=_a )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple , _a : Dict , _a : Any , _a : Optional[Any] , _a : int , _a : Optional[int] , _a : Union[str, Any] , _a : List[str] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForTokenClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[Any] , _a : Tuple , _a : Tuple , _a : Tuple , _a : List[Any] , _a : Any , _a : int , _a : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForQuestionAnswering(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def A ( self : Any , _a : int , _a : str , _a : Dict , _a : Any , _a : Any ) -> Dict:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_a )
def A ( self : Dict ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(_a )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE =5_0000
_SCREAMING_SNAKE_CASE =[1, 6, vocab_size]
self.assertEqual(output.shape , _a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE =emba(input_ids.shape )
_SCREAMING_SNAKE_CASE =tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_SCREAMING_SNAKE_CASE =emba.weight[:3, :5]
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE =embed_positions([2, 16, 768] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_a , _a , _a )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
| 405 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Any:
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def lowerCamelCase_ ( self , snake_case ) -> Any:
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , *snake_case , **snake_case ) -> int:
return (args[0] + 1,) + args[1:], kwargs
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , snake_case ) -> str:
return output + 1
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = ModelHook()
add_hook_to_module(snake_case , snake_case )
self.assertEqual(test_model._hf_hook , snake_case )
self.assertTrue(hasattr(snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(snake_case )
self.assertFalse(hasattr(snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(snake_case , '_old_forward' ) )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = ModelHook()
add_hook_to_module(snake_case , snake_case )
add_hook_to_module(snake_case , snake_case , append=snake_case )
self.assertEqual(isinstance(test_model._hf_hook , snake_case ) , snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(snake_case )
self.assertFalse(hasattr(snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(snake_case , '_old_forward' ) )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = test_model(x + 1 )
_UpperCAmelCase = test_model(x + 2 )
_UpperCAmelCase = PreForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_UpperCAmelCase = PreForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_UpperCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1E-5 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = test_model(snake_case )
_UpperCAmelCase = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_UpperCAmelCase = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_UpperCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
assert torch.allclose(snake_case , output + 2 , atol=1E-5 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = test_model(snake_case )
_UpperCAmelCase = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_UpperCAmelCase = True
_UpperCAmelCase = test_model(snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(snake_case , AlignDevicesHook(io_same_device=snake_case ) )
_UpperCAmelCase = torch.randn(2 , 3 ).to(0 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_UpperCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_UpperCAmelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_UpperCAmelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_UpperCAmelCase = torch.device(snake_case )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case , offload_buffers=snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_UpperCAmelCase = torch.device(snake_case )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() , offload_buffers=snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 705 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowerCAmelCase__ : Dict =None
lowerCAmelCase__ : Optional[int] ={
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
lowerCAmelCase__ : List[str] ={
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def a__ ( A__, A__=1, A__=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a__ ( A__ ):
with open(__lowerCamelCase, 'r' ) as f:
return json.load(__lowerCamelCase )
def a__ ( A__, A__ ):
with open(__lowerCamelCase, 'w' ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase )
def a__ ( A__, A__, A__, A__=True ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(__lowerCamelCase, 'tmp' )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : str = read_json(os.path.join(__lowerCamelCase, 'params.json' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE_ : int = params['n_layers']
SCREAMING_SNAKE_CASE_ : Dict = params['n_heads']
SCREAMING_SNAKE_CASE_ : Tuple = n_heads // num_shards
SCREAMING_SNAKE_CASE_ : int = params['dim']
SCREAMING_SNAKE_CASE_ : Dict = dim // n_heads
SCREAMING_SNAKE_CASE_ : List[Any] = 1_0_0_0_0.0
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0 / (base ** (torch.arange(0, __lowerCamelCase, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE_ : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
SCREAMING_SNAKE_CASE_ : List[str] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE_ : Any = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE_ : Dict = n_heads
SCREAMING_SNAKE_CASE_ : List[Any] = n_heads_per_shard
SCREAMING_SNAKE_CASE_ : Optional[Any] = dim
# permute for sliced rotary
def permute(A__, A__=n_heads, A__=dim, A__=dim ):
return w.view(__lowerCamelCase, dima // n_heads // 2, 2, __lowerCamelCase ).transpose(1, 2 ).reshape(__lowerCamelCase, __lowerCamelCase )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(os.path.join(__lowerCamelCase, 'consolidated.00.pth' ), map_location='cpu' )
else:
# Sharded
SCREAMING_SNAKE_CASE_ : List[str] = [
torch.load(os.path.join(__lowerCamelCase, F'''consolidated.{i:02d}.pth''' ), map_location='cpu' )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = {'weight_map': {}}
for layer_i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE_ : Tuple = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE_ : str = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(__lowerCamelCase )], dim=1 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__lowerCamelCase )], dim=0 )
SCREAMING_SNAKE_CASE_ : str = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__lowerCamelCase )], dim=1 )
SCREAMING_SNAKE_CASE_ : int = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__lowerCamelCase )], dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ : Tuple = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ : Dict = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
SCREAMING_SNAKE_CASE_ : Any = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(__lowerCamelCase )], dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(__lowerCamelCase )], dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ : int = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
# Write configs
SCREAMING_SNAKE_CASE_ : List[str] = {'total_size': param_count * 2}
write_json(__lowerCamelCase, os.path.join(__lowerCamelCase, 'pytorch_model.bin.index.json' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
SCREAMING_SNAKE_CASE_ : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaConfig(
hidden_size=__lowerCamelCase, intermediate_size=compute_intermediate_size(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), num_attention_heads=params['n_heads'], num_hidden_layers=params['n_layers'], rms_norm_eps=params['norm_eps'], num_key_value_heads=__lowerCamelCase, )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
SCREAMING_SNAKE_CASE_ : int = LlamaForCausalLM.from_pretrained(__lowerCamelCase, torch_dtype=torch.floataa, low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(__lowerCamelCase, safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def a__ ( A__, A__ ):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--input_dir', help='Location of LLaMA weights, which contains tokenizer.model and model folders', )
parser.add_argument(
'--model_size', choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'], )
parser.add_argument(
'--output_dir', help='Location to write HF model and tokenizer', )
parser.add_argument('--safe_serialization', type=__lowerCamelCase, help='Whether or not to save using `safetensors`.' )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.input_dir, 'tokenizer.model' )
write_tokenizer(args.output_dir, __lowerCamelCase )
if __name__ == "__main__":
main()
| 101 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = """▁"""
__A : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model"""}
__A : Optional[int] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
__A : Any = {
"""xlm-roberta-base""": 5_12,
"""xlm-roberta-large""": 5_12,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_12,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_12,
"""xlm-roberta-large-finetuned-conll03-english""": 5_12,
"""xlm-roberta-large-finetuned-conll03-german""": 5_12,
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : int="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : str , ) -> None:
'''simple docstring'''
lowerCamelCase__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
lowerCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ = 1
lowerCamelCase__ = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def a__ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self : Dict , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def a__ ( self : int , __lowerCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ ( self : Union[str, Any] , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self : List[str] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def a__ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 187 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "char"
lowerCAmelCase__ = "bpe"
lowerCAmelCase__ = "wp"
__A : Any = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["image_processor", "char_tokenizer"]
lowerCAmelCase__ = "ViTImageProcessor"
lowerCAmelCase__ = "MgpstrTokenizer"
def __init__( self : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
lowerCamelCase__ = kwargs.pop("feature_extractor" )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCamelCase__ = tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCamelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCamelCase__ = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
lowerCamelCase__ = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase__ = encodings["input_ids"]
return inputs
def a__ ( self : Any , __lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = sequences
lowerCamelCase__ = char_preds.size(0 )
lowerCamelCase__ , lowerCamelCase__ = self._decode_helper(__lowerCamelCase , "char" )
lowerCamelCase__ , lowerCamelCase__ = self._decode_helper(__lowerCamelCase , "bpe" )
lowerCamelCase__ , lowerCamelCase__ = self._decode_helper(__lowerCamelCase , "wp" )
lowerCamelCase__ = []
lowerCamelCase__ = []
for i in range(__lowerCamelCase ):
lowerCamelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase__ = scores.index(max(__lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase__ = {}
lowerCamelCase__ = final_strs
lowerCamelCase__ = final_scores
lowerCamelCase__ = char_strs
lowerCamelCase__ = bpe_strs
lowerCamelCase__ = wp_strs
return out
def a__ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCamelCase__ = self.char_decode
lowerCamelCase__ = 1
lowerCamelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCamelCase__ = self.bpe_decode
lowerCamelCase__ = 2
lowerCamelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCamelCase__ = self.wp_decode
lowerCamelCase__ = 102
lowerCamelCase__ = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
lowerCamelCase__ , lowerCamelCase__ = [], []
lowerCamelCase__ = pred_logits.size(0 )
lowerCamelCase__ = pred_logits.size(1 )
lowerCamelCase__ , lowerCamelCase__ = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase )
lowerCamelCase__ = preds_index.view(-1 , __lowerCamelCase )[:, 1:]
lowerCamelCase__ = decoder(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 )
lowerCamelCase__ = preds_max_prob[:, 1:]
for index in range(__lowerCamelCase ):
lowerCamelCase__ = preds_str[index].find(__lowerCamelCase )
lowerCamelCase__ = preds_str[index][:pred_eos]
lowerCamelCase__ = preds_index[index].cpu().tolist()
lowerCamelCase__ = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1
lowerCamelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCamelCase )
conf_scores.append(__lowerCamelCase )
return dec_strs, conf_scores
def a__ ( self : Any , __lowerCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
def a__ ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCamelCase )
def a__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
| 187 | 1 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __A ( a_ :bytes , a_ :int) -> np.array:
__a : str = F"""{sampling_rate}"""
__a : Tuple = '''1'''
__a : Optional[int] = '''f32le'''
__a : Union[str, Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE) as ffmpeg_process:
__a : List[str] = ffmpeg_process.communicate(a_)
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''') from error
__a : List[Any] = output_stream[0]
__a : Union[str, Any] = np.frombuffer(a_ , np.floataa)
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''')
return audio
def __A ( a_ :int , a_ :float , a_ :str = "f32le" , ) -> Tuple:
__a : int = F"""{sampling_rate}"""
__a : List[Any] = '''1'''
if format_for_conversion == "s16le":
__a : Dict = 2
elif format_for_conversion == "f32le":
__a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""")
__a : List[Any] = platform.system()
if system == "Linux":
__a : List[Any] = '''alsa'''
__a : str = '''default'''
elif system == "Darwin":
__a : List[str] = '''avfoundation'''
__a : List[str] = ''':0'''
elif system == "Windows":
__a : Any = '''dshow'''
__a : List[str] = '''default'''
__a : Dict = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__a : Union[str, Any] = int(round(sampling_rate * chunk_length_s)) * size_of_sample
__a : Optional[int] = _ffmpeg_stream(a_ , a_)
for item in iterator:
yield item
def __A ( a_ :int , a_ :float , a_ :Optional[int] = None , a_ :Optional[Union[Tuple[float, float], float]] = None , a_ :str = "f32le" , ) -> Optional[Any]:
if stream_chunk_s is not None:
__a : Optional[int] = stream_chunk_s
else:
__a : Optional[Any] = chunk_length_s
__a : Union[str, Any] = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_)
if format_for_conversion == "s16le":
__a : Tuple = np.intaa
__a : Optional[int] = 2
elif format_for_conversion == "f32le":
__a : str = np.floataa
__a : int = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""")
if stride_length_s is None:
__a : Tuple = chunk_length_s / 6
__a : Tuple = int(round(sampling_rate * chunk_length_s)) * size_of_sample
if isinstance(a_ , (int, float)):
__a : Optional[int] = [stride_length_s, stride_length_s]
__a : List[Any] = int(round(sampling_rate * stride_length_s[0])) * size_of_sample
__a : str = int(round(sampling_rate * stride_length_s[1])) * size_of_sample
__a : Dict = datetime.datetime.now()
__a : List[Any] = datetime.timedelta(seconds=a_)
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_):
# Put everything back in numpy scale
__a : int = np.frombuffer(item['''raw'''] , dtype=a_)
__a : str = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__a : List[str] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __A ( a_ :List[Any] , a_ :int , a_ :Tuple[int, int] , a_ :bool = False) -> List[Any]:
__a : Dict = b''''''
__a , __a : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""")
__a : Any = 0
for raw in iterator:
acc += raw
if stream and len(a_) < chunk_len:
__a : Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_) >= chunk_len:
# We are flushing the accumulator
__a : Tuple = (_stride_left, stride_right)
__a : Optional[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__a : Any = False
yield item
__a : Dict = stride_left
__a : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_) > stride_left:
__a : Union[str, Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__a : Tuple = False
yield item
def __A ( a_ :List[str] , a_ :int) -> List[str]:
__a : List[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_) as ffmpeg_process:
while True:
__a : Any = ffmpeg_process.stdout.read(a_)
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''') from error | 52 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_UpperCAmelCase = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
_UpperCAmelCase = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
_UpperCAmelCase = soup.find('meta', {'property': 'og:image'})['content']
_UpperCAmelCase = requests.get(image_url).content
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''') | 504 | 0 |
_lowercase = '''Input must be a string of 8 numbers plus letter'''
_lowercase = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCamelCase ( snake_case__):
if not isinstance(_snake_case , _snake_case):
lowerCAmelCase_ : Optional[Any] = F'''Expected string as input, found {type(_snake_case).__name__}'''
raise TypeError(_snake_case)
lowerCAmelCase_ : Any = spanish_id.replace("-" , "").upper()
if len(_snake_case) != 9:
raise ValueError(_snake_case)
try:
lowerCAmelCase_ : Any = int(spanish_id_clean[0:8])
lowerCAmelCase_ : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case) from ex
if letter.isdigit():
raise ValueError(_snake_case)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = tf.data.AUTOTUNE
def _snake_case () -> int:
_lowercase =argparse.ArgumentParser(description='Train a masked language model on TPU.')
parser.add_argument(
'--pretrained_model_config' , type=_snake_case , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_snake_case , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_snake_case , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_snake_case , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_snake_case , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_snake_case , help='Google cloud project name. Only used for non-Colab TPU nodes.')
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_snake_case , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_snake_case , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_snake_case , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_snake_case , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_snake_case , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_snake_case , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_snake_case , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_snake_case , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_snake_case , required=_snake_case , help='Path to save model checkpoints to.')
parser.add_argument('--hub_model_id' , type=_snake_case , help='Model ID to upload to on the Hugging Face Hub.')
_lowercase =parser.parse_args()
return args
def _snake_case (_snake_case : List[Any]) -> Union[str, Any]:
try:
if args.tpu_name:
_lowercase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project)
else:
_lowercase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.')
tf.config.experimental_connect_to_cluster(_snake_case)
tf.tpu.experimental.initialize_tpu_system(_snake_case)
return tpu
def _snake_case (_snake_case : List[str]) -> Union[str, Any]:
_lowercase =0
for file in file_list:
_lowercase =file.split('/')[-1]
_lowercase =re.search(R'-\d+-(\d+)\.tfrecord' , _snake_case).group(1)
_lowercase =int(_snake_case)
num_samples += sample_count
return num_samples
def _snake_case (_snake_case : Dict , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : List[Any]=None) -> Optional[int]:
_lowercase =count_samples(_snake_case)
_lowercase =tf.data.Dataset.from_tensor_slices(_snake_case)
if shuffle:
_lowercase =dataset.shuffle(len(_snake_case))
_lowercase =tf.data.TFRecordDataset(_snake_case , num_parallel_reads=_snake_case)
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_lowercase =dataset.apply(tf.data.experimental.assert_cardinality(_snake_case))
_lowercase =dataset.map(_snake_case , num_parallel_calls=_snake_case)
if shuffle:
assert shuffle_buffer_size is not None
_lowercase =dataset.shuffle(args.shuffle_buffer_size)
_lowercase =dataset.batch(_snake_case , drop_remainder=_snake_case)
_lowercase =dataset.map(_snake_case , num_parallel_calls=_snake_case)
_lowercase =dataset.prefetch(_snake_case)
return dataset
def _snake_case (_snake_case : Union[str, Any]) -> Union[str, Any]:
if not args.no_tpu:
_lowercase =initialize_tpu(_snake_case)
_lowercase =tf.distribute.TPUStrategy(_snake_case)
else:
_lowercase =tf.distribute.OneDeviceStrategy(device='/gpu:0')
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
_lowercase =AutoTokenizer.from_pretrained(args.tokenizer)
_lowercase =AutoConfig.from_pretrained(args.pretrained_model_config)
_lowercase =tokenizer.vocab_size
_lowercase =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord'))
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''')
_lowercase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord'))
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''')
_lowercase =count_samples(_snake_case)
_lowercase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_lowercase =steps_per_epoch * args.num_epochs
with strategy.scope():
_lowercase =TFAutoModelForMaskedLM.from_config(_snake_case)
model(model.dummy_inputs) # Pass some dummy inputs through the model to ensure all the weights are built
_lowercase , _lowercase =create_optimizer(
num_train_steps=_snake_case , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_snake_case , metrics=['accuracy'])
def decode_fn(_snake_case : int):
_lowercase ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,)),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,)),
}
return tf.io.parse_single_example(_snake_case , _snake_case)
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_lowercase =DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm_probability=args.mlm_probability , mlm=_snake_case , return_tensors='tf')
def mask_with_collator(_snake_case : Any):
# TF really needs an isin() function
_lowercase =(
~tf.cast(batch['attention_mask'] , tf.bool)
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_lowercase , _lowercase =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_snake_case) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_snake_case , )
return batch
_lowercase =args.per_replica_batch_size * strategy.num_replicas_in_sync
_lowercase =prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , shuffle_buffer_size=args.shuffle_buffer_size , )
_lowercase =prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , )
_lowercase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_snake_case))
model.fit(
_snake_case , validation_data=_snake_case , epochs=args.num_epochs , callbacks=_snake_case , )
model.save_pretrained(args.output_dir)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = parse_args()
main(args)
| 181 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : Optional[int] =PegasusConfig
__lowerCAmelCase : Dict ={}
__lowerCAmelCase : List[Any] ='''gelu'''
def __init__( self :Optional[int], snake_case :Union[str, Any], snake_case :str=13, snake_case :Tuple=7, snake_case :str=True, snake_case :Dict=False, snake_case :List[Any]=99, snake_case :Any=32, snake_case :Tuple=2, snake_case :Optional[Any]=4, snake_case :List[str]=37, snake_case :str=0.1, snake_case :Any=0.1, snake_case :str=40, snake_case :str=2, snake_case :Union[str, Any]=1, snake_case :Tuple=0, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =eos_token_id
_lowercase =pad_token_id
_lowercase =bos_token_id
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowercase =tf.concat([input_ids, eos_tensor], axis=1)
_lowercase =ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_lowercase =prepare_pegasus_inputs_dict(snake_case, snake_case, snake_case)
return config, inputs_dict
def UpperCamelCase__ ( self :int, snake_case :int, snake_case :Union[str, Any]):
"""simple docstring"""
_lowercase =TFPegasusModel(config=snake_case).get_decoder()
_lowercase =inputs_dict['input_ids']
_lowercase =input_ids[:1, :]
_lowercase =inputs_dict['attention_mask'][:1, :]
_lowercase =inputs_dict['head_mask']
_lowercase =1
# first forward pass
_lowercase =model(snake_case, attention_mask=snake_case, head_mask=snake_case, use_cache=snake_case)
_lowercase , _lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3), config.vocab_size)
_lowercase =tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
_lowercase =tf.concat([input_ids, next_tokens], axis=-1)
_lowercase =tf.concat([attention_mask, next_attn_mask], axis=-1)
_lowercase =model(snake_case, attention_mask=snake_case)[0]
_lowercase =model(snake_case, attention_mask=snake_case, past_key_values=snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
_lowercase =int(ids_tensor((1,), output_from_past.shape[-1]))
_lowercase =output_from_no_past[:, -3:, random_slice_idx]
_lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case, snake_case, rtol=1e-3)
def _snake_case (_snake_case : str , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Any=None , _snake_case : Dict=None , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=None , _snake_case : str=None , ) -> List[str]:
if attention_mask is None:
_lowercase =tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
_lowercase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
_lowercase =tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase : Dict =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase : Union[str, Any] =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : List[str] =True
__lowerCAmelCase : Optional[int] =False
__lowerCAmelCase : List[Any] =False
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =TFPegasusModelTester(self)
_lowercase =ConfigTester(self, config_class=snake_case)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case)
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCAmelCase : Union[str, Any] =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase : Tuple ='''google/pegasus-xsum'''
@cached_property
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCamelCase__ ( self :Any, **snake_case :int):
"""simple docstring"""
_lowercase =self.translate_src_text(**snake_case)
assert self.expected_text == generated_words
def UpperCamelCase__ ( self :str, **snake_case :Tuple):
"""simple docstring"""
_lowercase =self.tokenizer(self.src_text, **snake_case, padding=snake_case, return_tensors='tf')
_lowercase =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=snake_case, )
_lowercase =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=snake_case)
return generated_words
@slow
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 181 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] = "x" , snake_case : Optional[Any] = 10**-10 , snake_case : Tuple = 1 , )-> complex:
_lowerCamelCase = symbols(__snake_case )
_lowerCamelCase = lambdify(__snake_case , __snake_case )
_lowerCamelCase = lambdify(__snake_case , diff(__snake_case , __snake_case ) )
_lowerCamelCase = starting_point
while True:
if diff_function(__snake_case ) != 0:
_lowerCamelCase = prev_guess - multiplicity * func(__snake_case ) / diff_function(
__snake_case )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_lowerCamelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 707 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int =logging.get_logger(__name__)
A_ : Union[str, Any] ={"""tokenizer_file""": """tokenizer.json"""}
A_ : Dict ={
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
def __init__( self , a__=None , a__=None , a__=None , a__="<unk>" , a__="<s>" , a__="</s>" , a__="<pad>" , a__=False , a__=False , **a__ , ):
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , add_prefix_space=a__ , clean_up_tokenization_spaces=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , a__ ) != add_prefix_space:
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = add_prefix_space
def snake_case_ ( self , *a__ , **a__ ):
_lowerCamelCase = kwargs.get('is_split_into_words' , a__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.' )
return super()._batch_encode_plus(*a__ , **a__ )
def snake_case_ ( self , *a__ , **a__ ):
_lowerCamelCase = kwargs.get('is_split_into_words' , a__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.' )
return super()._encode_plus(*a__ , **a__ )
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 222 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = generate_pascal_triangle(SCREAMING_SNAKE_CASE__ )
for row_idx in range(SCREAMING_SNAKE_CASE__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowerCAmelCase ( a_ ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
SCREAMING_SNAKE_CASE : list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE : int = populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
triangle.append(SCREAMING_SNAKE_CASE__ )
return triangle
def __lowerCAmelCase ( a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ):
calculate_current_element(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return current_row
def __lowerCAmelCase ( a_ , a_ , a_ , a_ , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE : List[Any] = triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE : List[Any] = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( a_ ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
SCREAMING_SNAKE_CASE : list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE : str = [0] + result[-1] + [0]
SCREAMING_SNAKE_CASE : Optional[int] = row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE : str = sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) )
SCREAMING_SNAKE_CASE : Any = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE : Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE : List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE__ )
return result
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a_ , a_ ) -> None:
SCREAMING_SNAKE_CASE : Optional[Any] = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE : Dict = timeit(f"""__main__.{call}""" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 251 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : Optional[int] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 453 | 0 |
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = len(a__ )
for i in range(length - 1 ):
A_ = i
for k in range(i + 1 , a__ ):
if collection[k] < collection[least]:
A_ = k
if least != i:
A_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted)) | 141 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Any = True
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase :Union[str, Any] = SpeechTaTokenizer(UpperCAmelCase )
lowerCAmelCase :Any = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
lowerCAmelCase :List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Any ) -> Union[str, Any]:
lowerCAmelCase :Tuple = 'this is a test'
lowerCAmelCase :Union[str, Any] = 'this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase :Dict = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
lowerCAmelCase :List[Any] = '<pad>'
lowerCAmelCase :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Any:
lowerCAmelCase :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCAmelCase ) , 81 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
lowerCAmelCase :Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase :Optional[int] = tokenizer.vocab_size
lowerCAmelCase :Union[str, Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase :Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase :Union[str, Any] = tokenizer.add_tokens(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.vocab_size
lowerCAmelCase :List[Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
lowerCAmelCase :int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase :List[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase :str = tokenizer.add_special_tokens(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
lowerCAmelCase :Tuple = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
lowerCAmelCase :List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : str ) -> int:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase :Optional[int] = self.get_tokenizer()
lowerCAmelCase :List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowerCAmelCase :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase :int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase :Any = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowerCAmelCase :List[str] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCAmelCase , ) | 553 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
assert isinstance(a__ , a__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ : List[str] = f'The input value of [n={number}] has to be > 0'
raise ValueError(a__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ : List[Any] = num - 1
SCREAMING_SNAKE_CASE_ : Dict = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 714 |
class snake_case_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Tuple = ''
SCREAMING_SNAKE_CASE_ : str = []
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE_ : Tuple = self.__min_dist_top_down_dp(__lowerCAmelCase , n - 1 )
SCREAMING_SNAKE_CASE_ : List[str] = self.__min_dist_top_down_dp(m - 1 , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE_ : Any = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self.dp[m][n]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = worda
SCREAMING_SNAKE_CASE_ : List[str] = worda
SCREAMING_SNAKE_CASE_ : int = [[-1 for _ in range(len(__lowerCAmelCase ) )] for _ in range(len(__lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCAmelCase ) - 1 , len(__lowerCAmelCase ) - 1 )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = worda
SCREAMING_SNAKE_CASE_ : List[str] = worda
SCREAMING_SNAKE_CASE_ : Tuple = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE_ : int = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE_ : int = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE_ : int = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE_ : str = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE_ : int = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE_ : int = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__: str = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowerCAmelCase__: Optional[Any] = input("Enter the first string: ").strip()
lowerCAmelCase__: Any = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 311 | 0 |
from typing import Any
def lowerCamelCase__ ( _a , _a , _a , _a , _a , ):
_validation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = {}
for state in states_space:
SCREAMING_SNAKE_CASE : str = observations_space[0]
SCREAMING_SNAKE_CASE : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCAmelCase)):
SCREAMING_SNAKE_CASE : Any = observations_space[o]
SCREAMING_SNAKE_CASE : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE : Any = ""
SCREAMING_SNAKE_CASE : List[Any] = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE : List[str] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE : int = probability
SCREAMING_SNAKE_CASE : List[str] = k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE : Any = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE : Tuple = arg_max
# The final observation
SCREAMING_SNAKE_CASE : int = observations_space[len(__lowerCAmelCase) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
SCREAMING_SNAKE_CASE : List[str] = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE : List[Any] = probability
SCREAMING_SNAKE_CASE : Tuple = k_state
SCREAMING_SNAKE_CASE : Any = arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE : Tuple = last_state
SCREAMING_SNAKE_CASE : Optional[int] = []
for o in range(len(__lowerCAmelCase) - 1 , -1 , -1):
result.append(__lowerCAmelCase)
SCREAMING_SNAKE_CASE : Optional[int] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( _a , _a , _a , _a , _a , ):
_validate_not_empty(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_validate_lists(__lowerCAmelCase , __lowerCAmelCase)
_validate_dicts(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def lowerCamelCase__ ( _a , _a , _a , _a , _a , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def lowerCamelCase__ ( _a , _a):
_validate_list(__lowerCAmelCase , "observations_space")
_validate_list(__lowerCAmelCase , "states_space")
def lowerCamelCase__ ( _a , _a):
if not isinstance(_object , __lowerCAmelCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{var_name} must be a list"
raise ValueError(__lowerCAmelCase)
else:
for x in _object:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase):
SCREAMING_SNAKE_CASE : Optional[Any] = f"{var_name} must be a list of strings"
raise ValueError(__lowerCAmelCase)
def lowerCamelCase__ ( _a , _a , _a , ):
_validate_dict(__lowerCAmelCase , "initial_probabilities" , __lowerCAmelCase)
_validate_nested_dict(__lowerCAmelCase , "transition_probabilities")
_validate_nested_dict(__lowerCAmelCase , "emission_probabilities")
def lowerCamelCase__ ( _a , _a):
_validate_dict(_object , __lowerCAmelCase , __lowerCAmelCase)
for x in _object.values():
_validate_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def lowerCamelCase__ ( _a , _a , _a , _a = False):
if not isinstance(_object , __lowerCAmelCase):
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{var_name} must be a dict"
raise ValueError(__lowerCAmelCase)
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase) for x in _object):
SCREAMING_SNAKE_CASE : List[Any] = f"{var_name} all keys must be strings"
raise ValueError(__lowerCAmelCase)
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase) for x in _object.values()):
SCREAMING_SNAKE_CASE : Union[str, Any] = "nested dictionary " if nested else ""
SCREAMING_SNAKE_CASE : int = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__lowerCAmelCase)
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = 1.6021E-19 # units = C
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 1 |
from __future__ import annotations
from typing import TypedDict
class A__ ( __snake_case ):
_UpperCAmelCase :str
_UpperCAmelCase :int
def A_ ( _lowerCAmelCase ) -> list[str]:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowerCAmelCase ) )]
def A_ ( _lowerCAmelCase ) -> BWTTransformDict:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCamelCase : Tuple = all_rotations(_lowerCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCAmelCase ),
}
return response
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCamelCase : Union[str, Any] = int(_lowerCAmelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowerCAmelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCamelCase : int = [""] * len(_lowerCAmelCase )
for _ in range(len(_lowerCAmelCase ) ):
for i in range(len(_lowerCAmelCase ) ):
UpperCamelCase : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCamelCase : int = """Provide a string that I will generate its BWT transform: """
__lowerCamelCase : Optional[Any] = input(entry_msg).strip()
__lowerCamelCase : List[str] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
__lowerCamelCase : Optional[int] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 629 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = ['input_features', 'attention_mask']
def __init__( self , A_=80 , A_=1_6000 , A_=80 , A_=0.0 , A_=True , A_=True , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
UpperCamelCase : List[Any] = num_mel_bins
UpperCamelCase : Any = do_ceptral_normalize
UpperCamelCase : int = normalize_means
UpperCamelCase : Tuple = normalize_vars
UpperCamelCase : List[Any] = True
def __UpperCamelCase( self , A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase : Optional[Any] = torch.from_numpy(A_ ).unsqueeze(0 )
UpperCamelCase : Dict = ta_kaldi.fbank(A_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __UpperCamelCase( A_ , A_ , A_ = True , A_ = True , A_ = 0.0 , ):
'''simple docstring'''
if normalize_means:
UpperCamelCase : str = x[:input_length].mean(axis=0 )
UpperCamelCase : Any = np.subtract(A_ , A_ )
if normalize_vars:
UpperCamelCase : List[str] = x[:input_length].std(axis=0 )
UpperCamelCase : Optional[int] = np.divide(A_ , A_ )
if input_length < x.shape[0]:
UpperCamelCase : Dict = padding_value
# make sure array is in float32
UpperCamelCase : Union[str, Any] = x.astype(np.floataa )
return x
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(A_ , A_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(A_ , A_ )
]
def __call__( self , A_ , A_ = False , A_ = None , A_ = False , A_ = None , A_ = None , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : str = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : List[Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[str] = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : int = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [raw_speech]
# extract fbank features
UpperCamelCase : Optional[int] = [self._extract_fbank_features(A_ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase : List[Any] = BatchFeature({"input_features": features} )
UpperCamelCase : Any = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
UpperCamelCase : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , A_ ):
UpperCamelCase : str = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCamelCase : Optional[int] = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase : List[Any] = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase : int = self.normalize(
padded_inputs["input_features"] , attention_mask=A_ )
if return_tensors is not None:
UpperCamelCase : List[str] = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 629 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str]=False ) -> str:
"""simple docstring"""
A__ : int =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : int =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any =""""""
else:
A__ : Optional[int] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : str =state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A__ : Optional[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] =in_proj_weight[
: config.hidden_size, :
]
A__ : str =in_proj_bias[: config.hidden_size]
A__ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] =in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =dct.pop(__snake_case )
A__ : Tuple =val
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Tuple ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple =Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str]=True ) -> str:
"""simple docstring"""
A__ : Tuple =ViTConfig()
# patch_size
if model_name[-1] == "8":
A__ : Optional[Any] =8
# set labels if required
if not base_model:
A__ : Optional[Any] =1_000
A__ : str ="""huggingface/label-files"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Tuple =json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : List[str] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : List[Any] =idalabel
A__ : List[Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A__ : str =384
A__ : Optional[Any] =1_536
A__ : Optional[Any] =12
A__ : Union[str, Any] =6
# load original model from torch hub
A__ : List[Any] =torch.hub.load("""facebookresearch/dino:main""", __snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : List[str] =original_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
A__ : Union[str, Any] =create_rename_keys(__snake_case, base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if base_model:
A__ : List[str] =ViTModel(__snake_case, add_pooling_layer=__snake_case ).eval()
else:
A__ : List[str] =ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
A__ : Union[str, Any] =ViTImageProcessor()
A__ : Optional[int] =image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Union[str, Any] =encoding["""pixel_values"""]
A__ : Union[str, Any] =model(__snake_case )
if base_model:
A__ : List[str] =original_model(__snake_case )
assert torch.allclose(__snake_case, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
A__ : Optional[int] =original_model(__snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__snake_case : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 687 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__magic_name__ = logging.get_logger(__name__)
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
@staticmethod
def snake_case_ ( ) -> Dict:
"""simple docstring"""
raise NotImplementedError
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> str:
"""simple docstring"""
raise NotImplementedError
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def snake_case_ ( cls ) -> str:
"""simple docstring"""
return f"""`pip install {cls.pip_package or cls.name}`"""
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """optuna"""
@staticmethod
def snake_case_ ( ) -> Optional[int]:
"""simple docstring"""
return is_optuna_available()
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Union[str, Any]:
"""simple docstring"""
return run_hp_search_optuna(_snake_case , _snake_case , _snake_case , **_snake_case )
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
return default_hp_space_optuna(_snake_case )
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """ray"""
__SCREAMING_SNAKE_CASE = """'ray[tune]'"""
@staticmethod
def snake_case_ ( ) -> str:
"""simple docstring"""
return is_ray_available()
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Tuple:
"""simple docstring"""
return run_hp_search_ray(_snake_case , _snake_case , _snake_case , **_snake_case )
def snake_case_ ( self , _snake_case ) -> Tuple:
"""simple docstring"""
return default_hp_space_ray(_snake_case )
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """sigopt"""
@staticmethod
def snake_case_ ( ) -> List[str]:
"""simple docstring"""
return is_sigopt_available()
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> List[Any]:
"""simple docstring"""
return run_hp_search_sigopt(_snake_case , _snake_case , _snake_case , **_snake_case )
def snake_case_ ( self , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
return default_hp_space_sigopt(_snake_case )
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """wandb"""
@staticmethod
def snake_case_ ( ) -> List[Any]:
"""simple docstring"""
return is_wandb_available()
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> int:
"""simple docstring"""
return run_hp_search_wandb(_snake_case , _snake_case , _snake_case , **_snake_case )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return default_hp_space_wandb(_snake_case )
__magic_name__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(A__ ) > 0:
UpperCAmelCase = available_backends[0].name
if len(A__ ) > 1:
logger.info(
F"""{len(A__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 254 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = True , _snake_case = "arrow" , **_snake_case , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , **_snake_case , )
UpperCAmelCase = load_from_cache_file
UpperCAmelCase = file_format
UpperCAmelCase = Spark(
df=_snake_case , features=_snake_case , cache_dir=_snake_case , working_dir=_snake_case , **_snake_case , )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 254 | 1 |
from collections import deque
class __snake_case :
def __init__( self : List[str] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = process_name # process name
UpperCAmelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase_ = arrival_time
UpperCAmelCase_ = burst_time # remaining burst time
UpperCAmelCase_ = 0 # total time of the process wait in ready queue
UpperCAmelCase_ = 0 # time from arrival time to completion time
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : list[int] , _snake_case : deque[Process] , _snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase_ = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase_ = queue
# current time
UpperCAmelCase_ = current_time
# finished process is in this sequence queue
UpperCAmelCase_ = deque()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def lowerCamelCase ( self : Any , _snake_case : list[Process]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def lowerCamelCase ( self : Dict , _snake_case : list[Process]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def lowerCamelCase ( self : Optional[int] , _snake_case : list[Process]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
completion_times.append(queue[i].stop_time)
return completion_times
def lowerCamelCase ( self : Tuple , _snake_case : deque[Process]):
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase ( self : Any , _snake_case : Process):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase ( self : int , _snake_case : deque[Process]):
"""simple docstring"""
UpperCAmelCase_ = deque() # sequence deque of finished process
while len(_snake_case) != 0:
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_snake_case)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase_ = 0
# set the process's turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(_snake_case)
self.finish_queue.extend(_snake_case) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase ( self : Optional[Any] , _snake_case : deque[Process] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_snake_case)):
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_snake_case)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_snake_case)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase_ = 0
# set the finish time
UpperCAmelCase_ = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_snake_case)
self.finish_queue.extend(_snake_case) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
for i in range(self.number_of_queues - 1):
UpperCAmelCase_ , UpperCAmelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ : str = Process("P1", 0, 53)
snake_case_ : Tuple = Process("P2", 0, 17)
snake_case_ : List[str] = Process("P3", 0, 68)
snake_case_ : str = Process("P4", 0, 24)
snake_case_ : Dict = 3
snake_case_ : Tuple = [17, 25]
snake_case_ : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
snake_case_ : str = Process("P1", 0, 53)
snake_case_ : Union[str, Any] = Process("P2", 0, 17)
snake_case_ : List[Any] = Process("P3", 0, 68)
snake_case_ : List[str] = Process("P4", 0, 24)
snake_case_ : int = 3
snake_case_ : List[Any] = [17, 25]
snake_case_ : int = deque([Pa, Pa, Pa, Pa])
snake_case_ : Dict = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 169 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A (__A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def A (__A : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase_ = s_dict.pop(__A )
elif "subsample" in key:
UpperCAmelCase_ = s_dict.pop(__A )
def A (__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__A , __A , bias=__A )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def A (__A : Dict , __A : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
UpperCAmelCase_ = mam_aaa['''args''']
UpperCAmelCase_ = mam_aaa['''model''']
UpperCAmelCase_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__A )
rename_keys(__A )
UpperCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ = args.share_decoder_input_output_embed
UpperCAmelCase_ = [int(__A ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCAmelCase_ = SpeechaTextConfig(
vocab_size=__A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__A ) , conv_channels=args.conv_channels , conv_kernel_sizes=__A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__A , num_beams=5 , max_length=200 , use_cache=__A , decoder_start_token_id=2 , early_stopping=__A , )
UpperCAmelCase_ = SpeechaTextForConditionalGeneration(__A )
UpperCAmelCase_ , UpperCAmelCase_ = model.model.load_state_dict(__A , strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
UpperCAmelCase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ = lm_head_weights
model.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : int = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 169 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=5 ) -> Dict:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
UpperCAmelCase = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1
UpperCAmelCase = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple
UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCAmelCase = logits[0, masked_index, :]
UpperCAmelCase = logits.softmax(dim=0 )
UpperCAmelCase = prob.topk(k=snake_case__ , dim=0 )
UpperCAmelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] )
UpperCAmelCase = tokenizer.mask_token
UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
UpperCAmelCase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(snake_case__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(snake_case__ ) , snake_case__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case__ , snake_case__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
a__ : Optional[int] = CamembertTokenizer.from_pretrained('camembert-base')
a__ : str = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
a__ : Tuple = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 51 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _A ( snake_case__ : Tuple ):
if isinstance(snake_case__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class snake_case :
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
snake_case__ : int = TFVisionTextDualEncoderModel(lowerCamelCase )
snake_case__ : List[str] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
snake_case__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase )
snake_case__ : List[Any] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : Tuple = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = {'''vision_model''': vision_model, '''text_model''': text_model}
snake_case__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
snake_case__ : Optional[int] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase )
snake_case__ : Tuple = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case__ : Dict = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
snake_case__ : List[str] = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case__ : Tuple = after_output[0].numpy()
snake_case__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase )
snake_case__ : List[Any] = model(
input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase )
snake_case__ : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = to_atuple(vision_model.config.image_size )
snake_case__ : List[str] = to_atuple(vision_model.config.patch_size )
snake_case__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : Dict = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase , lowerCamelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Tuple = self.get_pretrained_model_and_inputs()
snake_case__ : int = model_a(**lowerCamelCase )
snake_case__ : Tuple = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = model_a(**lowerCamelCase )
snake_case__ : Dict = after_outputs[0].numpy()
snake_case__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@require_tf
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
snake_case__ : Union[str, Any] = 13
snake_case__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : int = random_attention_mask([batch_size, 4] )
snake_case__ : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] = TFViTModel(lowerCamelCase , name='''vision_model''' )
snake_case__ : Optional[int] = TFBertModel(lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = TFViTModelTester(self )
snake_case__ : Optional[Any] = TFBertModelTester(self )
snake_case__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = vision_config_and_inputs
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
snake_case__ : Optional[Any] = 13
snake_case__ : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
snake_case__ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase , text_model=lowerCamelCase )
snake_case__ : List[Any] = model(
input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase )
snake_case__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case__ : Any = to_atuple(vision_model.config.image_size )
snake_case__ : List[str] = to_atuple(vision_model.config.patch_size )
snake_case__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ : Optional[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = TFDeiTModel(lowerCamelCase , name='''vision_model''' )
snake_case__ : Optional[Any] = TFRobertaModel(lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Dict = TFDeiTModelTester(self )
snake_case__ : Any = TFRobertaModelTester(self )
snake_case__ : int = vit_model_tester.prepare_config_and_inputs()
snake_case__ : List[str] = bert_model_tester.prepare_config_and_inputs()
snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = vision_config_and_inputs
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
snake_case__ : Union[str, Any] = 13
snake_case__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ : Dict = random_attention_mask([batch_size, 4] )
snake_case__ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = TFCLIPVisionModel(lowerCamelCase , name='''vision_model''' )
snake_case__ : List[Any] = TFBertModel(lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : int = TFCLIPVisionModelTester(self )
snake_case__ : str = TFBertModelTester(self )
snake_case__ : Tuple = clip_model_tester.prepare_config_and_inputs()
snake_case__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
snake_case__ ,snake_case__ : Dict = vision_config_and_inputs
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=lowerCamelCase )
snake_case__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
snake_case__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
snake_case__ : Optional[Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCamelCase , padding=lowerCamelCase , return_tensors='''np''' )
snake_case__ : Optional[int] = model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case__ : int = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCamelCase , atol=1E-3 ) )
| 261 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a = field
__a = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__a = Json(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , field=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __a ( self : str ):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dataset , SCREAMING_SNAKE_CASE__ : Union[PathLike, BinaryIO] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = """utf-8"""
__a = to_json_kwargs
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = self.to_json_kwargs.pop("""path_or_buf""" , SCREAMING_SNAKE_CASE__ )
__a = self.to_json_kwargs.pop("""orient""" , """records""" )
__a = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__a = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__a = self.to_json_kwargs.pop("""compression""" , SCREAMING_SNAKE_CASE__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=SCREAMING_SNAKE_CASE__ ) as buffer:
__a = self._write(file_obj=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
__a = self._write(
file_obj=self.path_or_buf , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **self.to_json_kwargs )
return written
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : BinaryIO , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
else:
__a , __a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(SCREAMING_SNAKE_CASE__ )
return written
| 700 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 0 # The first color of the flag.
SCREAMING_SNAKE_CASE_ = 1 # The second color of the flag.
SCREAMING_SNAKE_CASE_ = 2 # The third color of the flag.
SCREAMING_SNAKE_CASE_ = (red, white, blue)
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__SCREAMING_SNAKE_CASE ) == 1:
return list(__SCREAMING_SNAKE_CASE )
__a = 0
__a = len(__SCREAMING_SNAKE_CASE ) - 1
__a = 0
while mid <= high:
if sequence[mid] == colors[0]:
__a , __a = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__a , __a = sequence[high], sequence[mid]
high -= 1
else:
__a = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by commas:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 201 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCamelCase : Dict = datasets.logging.get_logger(__name__)
__UpperCamelCase : Tuple = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__UpperCamelCase : str = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n"
__UpperCamelCase : List[str] = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__UpperCamelCase : str = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
a = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
a = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
a = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
a = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
a = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int , __magic_name__ :str ):
'''simple docstring'''
a = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 468 |
import numpy as np
import datasets
_UpperCamelCase = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_UpperCamelCase = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_UpperCamelCase = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = np.array(UpperCAmelCase )
__snake_case : Tuple = np.array(UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__snake_case : int = X - np.mean(UpperCAmelCase )
__snake_case : List[str] = np.cov(reference_distribution.T )
try:
__snake_case : List[Any] = np.linalg.inv(UpperCAmelCase )
except np.linalg.LinAlgError:
__snake_case : Dict = np.linalg.pinv(UpperCAmelCase )
__snake_case : int = np.dot(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = np.dot(UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 243 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 0 |
from jiwer import compute_measures
import datasets
lowerCamelCase : List[str] ='''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase : Optional[Any] ='''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase : Union[str, Any] ='''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )["wer"]
else:
UpperCamelCase__ : int = 0
UpperCamelCase__ : int = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 228 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10 , __lowerCAmelCase = 22 ) -> int:
UpperCamelCase__ : Any = range(1 , __lowerCAmelCase )
UpperCamelCase__ : Any = range(1 , __lowerCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""") | 228 | 1 |
"""simple docstring"""
_lowerCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowerCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 348 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _A , _A , _A=0 ) -> Any:
# Format the message.
if name is None:
lowercase : Tuple = None
else:
lowercase : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase : List[str] = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase : Dict = param.view(*_A )
lowercase : str = param.transpose(0 , 2 )
lowercase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase : Any = param.view(*_A )
lowercase : Optional[int] = param.transpose(0 , 1 ).contiguous()
lowercase : Any = param.view(*_A )
return param
def UpperCamelCase ( _A , _A , _A ) -> List[str]:
# The converted output model.
lowercase : str = {}
# old versions did not store training args
lowercase : Optional[int] = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase : List[Any] = ds_args.padded_vocab_size
lowercase : int = ds_args.max_position_embeddings
lowercase : Optional[Any] = ds_args.hidden_size
lowercase : int = ds_args.num_layers
lowercase : Union[str, Any] = ds_args.num_attention_heads
lowercase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase : int = config.n_head
# The hidden_size per head.
lowercase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase : List[str] = input_state_dict["""checkpoint_version"""]
else:
lowercase : List[str] = 0.0
# The model.
lowercase : Tuple = input_state_dict["""model"""]
# The language model.
lowercase : Optional[int] = model["""language_model"""]
# The embeddings.
lowercase : Optional[int] = lm["""embedding"""]
# The word embeddings.
lowercase : Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase : Tuple = word_embeddings[: config.vocab_size, :]
lowercase : Tuple = word_embeddings
# The position embeddings.
lowercase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowercase : Optional[int] = pos_embeddings
# The transformer.
lowercase : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowercase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase : int = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
lowercase : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase : Dict = m.group(3 )
# The name of the layer.
lowercase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowercase : List[str] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowercase : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowercase : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase : str = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase : Tuple = masked_bias
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowercase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase : Optional[int] = megatron_to_transformers[op_name]
lowercase : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase : Union[str, Any] = megatron_to_transformers[op_name]
lowercase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase : Dict = transformer["""final_layernorm.weight"""]
lowercase : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase : int = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> int:
# Create the argument parser.
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
lowercase : Dict = parser.parse_args()
# Extract the basename.
lowercase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowercase : Any = torch.load(_A , map_location="""cpu""" )
else:
lowercase : Tuple = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowercase : Dict = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase : Optional[int] = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase : int = """gelu_new"""
else:
lowercase : Tuple = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase : List[str] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase : Optional[Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
lowercase : int = GPTaConfig.from_json_file(args.config_file )
lowercase : Optional[Any] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowercase : List[str] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase : Optional[Any] = """gpt2"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : Union[str, Any] = type(_A ).__name__
lowercase : Any = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowercase : Any = os.path.join(_A , """pytorch_model.bin""" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 348 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowercase = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
"""simple docstring"""
_A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[Any] = input("""Enter message: """ )
UpperCAmelCase__ : Optional[Any] = input("""Enter key [alphanumeric]: """ )
UpperCAmelCase__ : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCAmelCase__ : Optional[Any] = """encrypt"""
UpperCAmelCase__ : List[Any] = encrypt_message(lowerCAmelCase , lowerCAmelCase )
elif mode.lower().startswith("""d""" ):
UpperCAmelCase__ : Optional[Any] = """decrypt"""
UpperCAmelCase__ : Dict = decrypt_message(lowerCAmelCase , lowerCAmelCase )
print(F"""\n{mode.title()}ed message:""" )
print(lowerCAmelCase )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
return translate_message(lowerCAmelCase , lowerCAmelCase , """encrypt""" )
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
return translate_message(lowerCAmelCase , lowerCAmelCase , """decrypt""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = key.upper()
for symbol in message:
UpperCAmelCase__ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase ):
UpperCAmelCase__ : List[str] = 0
else:
translated.append(lowerCAmelCase )
return "".join(lowerCAmelCase )
if __name__ == "__main__":
main()
| 182 | 0 |
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
if len(lowerCAmelCase_ ) < 2:
return collection
def circle_sort_util(snake_case , snake_case , snake_case ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase = circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
a = input("Enter numbers separated by a comma:\n").strip()
a = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted)) | 703 |
import math
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float:
if (
not isinstance(snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float:
if (
not isinstance(snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 175 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = 2
__a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
__a = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def snake_case_ ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def snake_case_ ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
__a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = MultiControlNetModel([controlneta, controlneta] )
__a = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = 2
__a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
__a = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
__a = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def snake_case_ ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
__a = 10.0
__a = 4
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A )[0]
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__a = self.get_dummy_inputs(__A )
__a = steps
__a = scale
__a = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def snake_case_ ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def snake_case_ ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def snake_case_ ( self ):
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
__a = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a = """evil space-punk bird"""
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
__a = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
__a = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 99 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = [0] * len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Tuple = [1] * len(_lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
_lowerCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase : List[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
print(max(_lowerCamelCase ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 500 | 0 |
import doctest
from collections import deque
import numpy as np
class __A :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =[2, 1, 2, -1]
__UpperCamelCase : Optional[Any] =[1, 2, 3, 4]
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =len(self.first_signal )
__UpperCamelCase : List[Any] =len(self.second_signal )
__UpperCamelCase : List[str] =max(lowerCamelCase__ , lowerCamelCase__ )
# create a zero matrix of max_length x max_length
__UpperCamelCase : List[str] =[[0] * max_length for i in range(lowerCamelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase__ ):
__UpperCamelCase : Optional[Any] =deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase__ )
for j, item in enumerate(lowerCamelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCamelCase : List[Any] =np.matmul(np.transpose(lowerCamelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 154 |
def A ( a_ = 600_851_475_143 ) -> int:
try:
__UpperCamelCase : int =int(a_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__UpperCamelCase : List[str] =2
__UpperCamelCase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : Optional[Any] =i
while n % i == 0:
__UpperCamelCase : Any =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 154 | 1 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
A__ = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
UpperCamelCase = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 104 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase : str = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
__A =self.diffusers_dir
shutil.copy(
os.path.join(lowercase__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
__A =comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__A =comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__A =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__A =black.format_str(lowercase__ , mode=lowercase__ )
__A =os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowercase__ , '''w''' , newline='''\n''' ) as f:
f.write(lowercase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase__ )
with open(lowercase__ , '''r''' ) as f:
self.assertTrue(f.read() , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowercase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowercase__ ) , )
# Copy consistency with a really long name
__A ='''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , lowercase__ , lowercase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowercase__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowercase__ ) , )
| 184 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCamelCase( a ):
if isinstance(a , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case__ :
def a__ ( self , lowerCamelCase , lowerCamelCase ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase , lowerCamelCase , F"Difference between torch and flax is {diff} (>= {tol})." )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel(lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a , __a = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
__a = {"vision_model": vision_model, "text_model": text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a , __a = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
__a = {"vision_model": vision_model, "text_model": text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
__a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
__a = model(input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase )
__a = after_output[0]
__a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-3 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
__a , __a = self.get_vision_text_model(lowerCamelCase , lowerCamelCase )
__a = {"vision_model": vision_model, "text_model": text_model}
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
__a = model(
input_ids=lowerCamelCase , pixel_values=lowerCamelCase , attention_mask=lowerCamelCase , output_attentions=lowerCamelCase )
__a = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size )
__a = to_atuple(vision_model.config.patch_size )
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__a = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
__a = inputs_dict
__a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__a = pt_model(**lowerCamelCase ).to_tuple()
__a = fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase , from_pt=lowerCamelCase )
__a = fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
__a = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase , from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
__a = pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase , pt_output_loaded.numpy() , 4E-2 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
__a = VisionTextDualEncoderModel(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel(lowerCamelCase )
__a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase )
__a = fx_state
self.check_pt_flax_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase , lowerCamelCase )
__a = VisionTextDualEncoderModel(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel(lowerCamelCase )
__a = load_flax_weights_in_pytorch_model(lowerCamelCase , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a = config_inputs_dict.pop("vision_config" )
__a = config_inputs_dict.pop("text_config" )
__a = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@slow
def a__ ( self ):
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**lowerCamelCase )
__a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
__a = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
__a = model_a(**lowerCamelCase )
__a = after_outputs[0]
__a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@require_flax
class snake_case__ ( snake_case_, unittest.TestCase ):
def a__ ( self ):
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase , text_from_pt=lowerCamelCase , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__a = random_attention_mask([batch_size, 4] )
__a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = FlaxViTModel(lowerCamelCase )
__a = FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def a__ ( self ):
__a = FlaxViTModelTester(self )
__a = FlaxBertModelTester(self )
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case__ ( snake_case_, unittest.TestCase ):
def a__ ( self ):
__a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase , text_from_pt=lowerCamelCase , )
__a = 13
__a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__a = random_attention_mask([batch_size, 4] )
__a = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = FlaxCLIPVisionModel(lowerCamelCase )
__a = FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def a__ ( self ):
__a = FlaxCLIPVisionModelTester(self )
__a = FlaxBertModelTester(self )
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
__a , __a , __a , __a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
__a = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__a = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" )
__a = model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase , atol=1E-3 ) )
| 67 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowercase : List[Any] = '''bert-base-cased'''
__lowercase : Optional[int] = '''fp16'''
__lowercase : Any = '''bf16'''
__lowercase : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Any = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def UpperCAmelCase__ (self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A ):
lowerCamelCase_ : List[str] = self.dist_env.copy()
lowerCamelCase_ : List[Any] = F"""{i + 1}"""
lowerCamelCase_ : Optional[Any] = strategy
with mockenv_context(**A ):
lowerCamelCase_ : Dict = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCAmelCase__ (self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A ):
lowerCamelCase_ : Dict = self.dist_env.copy()
lowerCamelCase_ : Optional[int] = prefetch_policy
with mockenv_context(**A ):
lowerCamelCase_ : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCAmelCase__ (self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A ):
lowerCamelCase_ : Union[str, Any] = self.dist_env.copy()
lowerCamelCase_ : Any = state_dict_type
with mockenv_context(**A ):
lowerCamelCase_ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = AutoModel.from_pretrained(A )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCamelCase_ : int = self.dist_env.copy()
lowerCamelCase_ : Tuple = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCamelCase_ : Optional[int] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
lowerCamelCase_ : Union[str, Any] = '''2000'''
with mockenv_context(**A ):
lowerCamelCase_ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCamelCase_ : List[Any] = self.dist_env.copy()
lowerCamelCase_ : Any = '''TRANSFORMER_BASED_WRAP'''
lowerCamelCase_ : List[Any] = '''T5Layer'''
with mockenv_context(**A ):
lowerCamelCase_ : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(A ) as cm:
fsdp_plugin.set_auto_wrap_policy(A )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
lowerCamelCase_ : Any = self.dist_env.copy()
lowerCamelCase_ : Dict = '''SIZE_BASED_WRAP'''
lowerCamelCase_ : Dict = '''0'''
with mockenv_context(**A ):
lowerCamelCase_ : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCAmelCase__ (self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCamelCase_ : str = self.dist_env.copy()
lowerCamelCase_ : Optional[int] = mp_dtype
with mockenv_context(**A ):
lowerCamelCase_ : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
lowerCamelCase_ : Any = torch.floataa
elif mp_dtype == "bf16":
lowerCamelCase_ : Optional[Any] = torch.bfloataa
lowerCamelCase_ : Tuple = MixedPrecision(param_dtype=A , reduce_dtype=A , buffer_dtype=A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(A )
def UpperCAmelCase__ (self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCamelCase_ : Any = self.dist_env.copy()
lowerCamelCase_ : List[str] = str(A ).lower()
with mockenv_context(**A ):
lowerCamelCase_ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A ) )
@require_fsdp
@require_multi_gpu
@slow
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Optional[int] = 0.82
lowerCamelCase_ : List[str] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
lowerCamelCase_ : int = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCamelCase_ : Tuple = 1_6_0
lowerCamelCase_ : Tuple = 1_6_0
lowerCamelCase_ : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
lowerCamelCase_ : List[str] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
lowerCamelCase_ : Any = cmd.copy()
for i, strategy in enumerate(A ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
lowerCamelCase_ : Optional[int] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(A ):
lowerCamelCase_ : str = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
lowerCamelCase_ : Dict = len(A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCamelCase_ : Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
lowerCamelCase_ : str = cmd_config[:-1]
lowerCamelCase_ : List[str] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
lowerCamelCase_ : Optional[Any] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCamelCase_ : Tuple = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(A ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
| 422 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_5_5 , A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : List[Any] = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : Optional[int] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean
lowerCamelCase_ : str = image_std
lowerCamelCase_ : List[Any] = do_rescale
lowerCamelCase_ : str = rescale_factor
lowerCamelCase_ : Optional[int] = do_pad
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : int = image.size
else:
lowerCamelCase_, lowerCamelCase_ : Any = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : str = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Any = self.size['''shortest_edge''']
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
else:
lowerCamelCase_ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : int = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ (self ):
# prepare image and target
lowerCamelCase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Any = json.loads(f.read() )
lowerCamelCase_ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase_ : Optional[Any] = DetaImageProcessor()
lowerCamelCase_ : Optional[int] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase_ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def UpperCAmelCase__ (self ):
# prepare image, target and masks_path
lowerCamelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Tuple = json.loads(f.read() )
lowerCamelCase_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase_ : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ : Any = DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ : Dict = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase_ : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 422 | 1 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
def decorator(__UpperCamelCase : str ):
snake_case_ : Dict = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
def __lowerCAmelCase ( *__UpperCamelCase : List[str] ):
'''simple docstring'''
def decorator(__UpperCamelCase : Optional[int] ):
snake_case_ : Optional[int] = getattr(__UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(__UpperCamelCase , """handle_key""" , __UpperCamelCase )
return func
return decorator
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __new__( cls , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = super().__new__(cls , _lowercase , _lowercase , _lowercase )
if not hasattr(_lowercase , """key_handler""" ):
setattr(_lowercase , """key_handler""" , {} )
setattr(_lowercase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case_ : List[str] = getattr(_lowercase , """handle_key""" , [] )
for key in handled_keys:
snake_case_ : List[str] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
snake_case_ : int = ord(_lowercase )
snake_case_ : Tuple = cls.key_handler.get(_lowercase )
if handler:
snake_case_ : Union[str, Any] = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _A ( lowercase__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def _A ( lowercase__ ):
from transformers.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 325 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def _A ( lowercase__ , lowercase__ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def _A ( lowercase__ , lowercase__ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowercase__ = 2
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase__ )
def inner_training_loop(lowercase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=lowercase__ )
lowercase__ , lowercase__ = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**lowercase__ )
lowercase__ = outputs.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**lowercase__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 325 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def run_func(a__ ):
@wraps(A__ )
def run_in_eager_mode(*a__ , **a__ ):
return func(*A__ , **A__ )
@wraps(A__ )
@tf.function(experimental_compile=A__ )
def run_in_graph_mode(*a__ , **a__ ):
return func(*A__ , **A__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =random.Random()
_lowerCAmelCase =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE ( __lowerCamelCase):
"""simple docstring"""
lowercase : TensorFlowBenchmarkArguments
lowercase : PretrainedConfig
lowercase : str = "TensorFlow"
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return tf.__version__
def UpperCamelCase__ ( self , __A , __A , __A ) -> Union[str, Any]:
# initialize GPU on separate process
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_inference )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_train )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_inference )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[str]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_train )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_lowerCAmelCase =(
hasattr(UpperCamelCase_ , 'architectures' )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase ='TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase =__import__('transformers' , fromlist=[model_class] )
_lowerCAmelCase =getattr(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase =model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_lowerCAmelCase =TF_MODEL_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase =config.vocab_size if hasattr(UpperCamelCase_ , 'vocab_size' ) else config.encoder.vocab_size
_lowerCAmelCase =random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , training=UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase_ , training=UpperCamelCase_ )
_lowerCAmelCase =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase__ ( self , __A , __A , __A ) -> Any:
_lowerCAmelCase =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_lowerCAmelCase =(
hasattr(UpperCamelCase_ , 'architectures' )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase ='TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase =__import__('transformers' , fromlist=[model_class] )
_lowerCAmelCase =getattr(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase =model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_lowerCAmelCase =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase =config.vocab_size if hasattr(UpperCamelCase_ , 'vocab_size' ) else config.encoder.vocab_size
_lowerCAmelCase =random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase =model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
_lowerCAmelCase =tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase =model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
_lowerCAmelCase =tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
_lowerCAmelCase =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase =timeit.repeat(
UpperCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_lowerCAmelCase =start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_lowerCAmelCase ='N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase =nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase_ )
_lowerCAmelCase =meminfo.used
_lowerCAmelCase =Memory(UpperCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_lowerCAmelCase =None
else:
_lowerCAmelCase =measure_peak_memory_cpu(UpperCamelCase_ )
_lowerCAmelCase =Memory(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase =stop_memory_tracing(UpperCamelCase_ )
if memory is None:
_lowerCAmelCase =summary.total
else:
_lowerCAmelCase =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 713 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58 | 0 |
import heapq
import sys
import numpy as np
__a = tuple[int, int]
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowercase_ = []
lowercase_ = set()
def _lowercase ( self : Dict ) -> Tuple:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _lowercase ( self : int ) -> Any:
return len(self.elements ) == 0
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> str:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
lowercase_ = []
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
lowercase_ = []
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowercase ( self : Any ) -> Optional[int]:
return self.elements[0][1]
def _lowercase ( self : Tuple ) -> str:
((lowercase_) , (lowercase_)) = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def a ( snake_case__: TPos , snake_case__: TPos ):
'''simple docstring'''
# euclidean distance
lowercase_ = np.array(snake_case__ )
lowercase_ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def a ( snake_case__: TPos , snake_case__: TPos ):
'''simple docstring'''
# integer division by time variable
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def a ( snake_case__: TPos , snake_case__: TPos ):
'''simple docstring'''
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def a ( snake_case__: TPos , snake_case__: int , snake_case__: TPos , snake_case__: dict[TPos, float] ):
'''simple docstring'''
lowercase_ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def a ( snake_case__: List[Any] , snake_case__: Dict , snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
lowercase_ = '''*'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
lowercase_ = '''#'''
lowercase_ = '''-'''
lowercase_ = back_pointer[goal]
while x != start:
((lowercase_) , (lowercase_)) = x
# print(x)
lowercase_ = '''-'''
lowercase_ = back_pointer[x]
lowercase_ = '''-'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowercase_ = back_pointer[goal]
while x != start:
print(snake_case__ , end=''' ''' )
lowercase_ = back_pointer[x]
print(snake_case__ )
sys.exit()
def a ( snake_case__: TPos ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def a ( snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: Dict , snake_case__: str , snake_case__: Dict , snake_case__: Optional[Any] , snake_case__: List[Any] , snake_case__: Optional[int] , ):
'''simple docstring'''
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((lowercase_) , (lowercase_)) = s
lowercase_ = (x - 1, y)
lowercase_ = (x + 1, y)
lowercase_ = (x, y + 1)
lowercase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
lowercase_ = -1
lowercase_ = float('''inf''' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase_ = g_function[s] + 1
lowercase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def a ( ):
'''simple docstring'''
lowercase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__a = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__a = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
__a = make_common_ground()
__a = blocks_blk
# hyper parameters
__a = 1
__a = 1
__a = 2_0
__a = 3 # one consistent and two other inconsistent
# start and end destination
__a = (0, 0)
__a = (n - 1, n - 1)
__a = 1
def a ( snake_case__: TPos , snake_case__: TPos , snake_case__: int ):
'''simple docstring'''
lowercase_ = {start: 0, goal: float('''inf''' )}
lowercase_ = {start: -1, goal: -1}
lowercase_ = []
lowercase_ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
lowercase_ = []
lowercase_ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowercase_ , lowercase_ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowercase_ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 97 |
"""simple docstring"""
from __future__ import annotations
import queue
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : int = data
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
def lowerCamelCase ( ):
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ : int = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ : queue.Queue = queue.Queue()
UpperCAmelCase__ : Dict = TreeNode(int(_snake_case ) )
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : Tuple = q.get()
UpperCAmelCase__ : List[Any] = F'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ : Dict = input(_snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ : Optional[int] = TreeNode(int(_snake_case ) )
UpperCAmelCase__ : Tuple = left_node
q.put(_snake_case )
UpperCAmelCase__ : int = F'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ : int = input(_snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ : int = TreeNode(int(_snake_case ) )
UpperCAmelCase__ : Any = right_node
q.put(_snake_case )
raise
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : int = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : Optional[Any] = []
while not q.empty():
UpperCAmelCase__ : Union[str, Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_snake_case )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : list[TreeNode] = []
UpperCAmelCase__ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(_snake_case )
UpperCAmelCase__ : Optional[int] = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase__ : Dict = n.right
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : list[TreeNode] = []
UpperCAmelCase__ : Optional[Any] = node
while n or stack:
while n:
stack.append(_snake_case )
UpperCAmelCase__ : int = n.left
UpperCAmelCase__ : Tuple = stack.pop()
print(n.data ,end=',' )
UpperCAmelCase__ : Tuple = n.right
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ : str = [], []
UpperCAmelCase__ : Union[str, Any] = node
stacka.append(_snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowerCamelCase ( _snake_case = "" ,_snake_case=50 ,_snake_case="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ : str = divmod(width - len(_snake_case ) - 2 ,2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
UpperCamelCase__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 110 | 0 |
'''simple docstring'''
def __UpperCamelCase ( a : Union[str, Any] , a : Any ) ->Optional[Any]:
snake_case = ''''''
for i in table:
res += inp[i - 1]
return res
def __UpperCamelCase ( a : str ) ->Tuple:
return data[1:] + data[0]
def __UpperCamelCase ( a : Tuple , a : List[str] ) ->Optional[Any]:
snake_case = ''''''
for i in range(len(a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __UpperCamelCase ( a : Optional[Any] , a : Dict ) ->Tuple:
snake_case = int('''0b''' + data[0] + data[-1] , 2 )
snake_case = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __UpperCamelCase ( a : Dict , a : List[Any] , a : Dict , a : Union[str, Any] , a : Union[str, Any] ) ->List[str]:
snake_case = message[:4]
snake_case = message[4:]
snake_case = apply_table(a , a )
snake_case = xor(a , a )
snake_case = apply_sbox(a , temp[:4] ) # noqa: E741
snake_case = apply_sbox(a , temp[4:] )
snake_case = '''0''' * (2 - len(a )) + l # noqa: E741
snake_case = '''0''' * (2 - len(a )) + r
snake_case = apply_table(l + r , a )
snake_case = xor(a , a )
return temp + right
if __name__ == "__main__":
_lowercase = input('Enter 10 bit key: ')
_lowercase = input('Enter 8 bit message: ')
_lowercase = [6, 3, 7, 4, 8, 5, 10, 9]
_lowercase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowercase = [2, 4, 3, 1]
_lowercase = [2, 6, 3, 1, 4, 8, 5, 7]
_lowercase = [4, 1, 3, 5, 7, 2, 8, 6]
_lowercase = [4, 1, 2, 3, 2, 3, 4, 1]
_lowercase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowercase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowercase = apply_table(key, paa_table)
_lowercase = temp[:5]
_lowercase = temp[5:]
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
# encryption
_lowercase = apply_table(message, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_lowercase = apply_table(CT, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 44 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {'''add_prefix_space''': True}
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self ) -> List[Any]:
snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
self.assertListEqual(A__ , A__ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = tokenizer.encode(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self , A__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Tuple:
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase ( self ) -> str:
snake_case = '''$$$'''
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(A__ )
snake_case = tokenizer(A__ )
self.assertEqual(out_s.input_ids[0] , A__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case = tokenizer.encode(A__ )
snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
| 44 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : Dict = MgpstrTokenizer
__A : Optional[Any] = False
__A : Dict = {}
__A : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
super().setUp()
# fmt: off
lowerCAmelCase_ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowerCAmelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = '''tester'''
lowerCAmelCase_ = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCAmelCase_ = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
lowerCAmelCase_ = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ ,lowerCAmelCase_ = self.get_input_output_texts(_lowerCamelCase )
lowerCAmelCase_ = tokenizer.tokenize(_lowerCamelCase )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
lowerCAmelCase_ = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertNotEqual(len(_lowerCamelCase ) , 0 )
lowerCAmelCase_ = tokenizer.decode(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase_ ( self ):
pass
| 274 | '''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( __snake_case : float , __snake_case : float , __snake_case : int) -> float:
lowerCAmelCase_ = x
lowerCAmelCase_ = y
for step in range(__snake_case): # noqa: B007
lowerCAmelCase_ = a * a - b * b + x
lowerCAmelCase_ = 2 * a * b + y
lowerCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( __snake_case : float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case_ ( __snake_case : float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(__snake_case , 1 , 1))
def snake_case_ ( __snake_case : int = 800 , __snake_case : int = 600 , __snake_case : float = -0.6 , __snake_case : float = 0 , __snake_case : float = 3.2 , __snake_case : int = 50 , __snake_case : bool = True , ) -> Image.Image:
lowerCAmelCase_ = Image.new('''RGB''' , (image_width, image_height))
lowerCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__snake_case):
for image_y in range(__snake_case):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ = figure_width / image_width * image_height
lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ = get_distance(__snake_case , __snake_case , __snake_case)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ = get_color_coded_rgb(__snake_case)
else:
lowerCAmelCase_ = get_black_and_white_rgb(__snake_case)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ : List[str] =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 274 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE ( snake_case = 1_00_00_00):
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 706 | """simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowercase ( self : Optional[int] , A_ : List[str]=0 ) -> int:
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
__snake_case = np.random.RandomState(A_ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : Optional[Any] ) -> List[Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase ( self : Tuple ) -> Optional[int]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> str:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : str ) -> List[str]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase ( self : List[str] ) -> Any:
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**A_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : str ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase ( self : str ) -> Optional[int]:
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 93 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["DeiTFeatureExtractor"]
lowercase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> str | Literal[False]:
UpperCAmelCase_ : Optional[int] = list(UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = list(UpperCamelCase )
UpperCAmelCase_ : Tuple = 0
for i in range(len(UpperCamelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ : Any = '_'
if count > 1:
return False
else:
return "".join(UpperCamelCase )
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> list[str]:
UpperCAmelCase_ : Tuple = []
while True:
UpperCAmelCase_ : Union[str, Any] = ['$'] * len(UpperCamelCase )
UpperCAmelCase_ : Optional[int] = []
for i in range(len(UpperCamelCase ) ):
for j in range(i + 1 ,len(UpperCamelCase ) ):
UpperCAmelCase_ : Optional[Any] = compare_string(binary[i] ,binary[j] )
if k is False:
UpperCAmelCase_ : Dict = '*'
UpperCAmelCase_ : List[Any] = '*'
temp.append('X' )
for i in range(len(UpperCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase ) == 0:
return pi
UpperCAmelCase_ : Optional[Any] = list(set(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> list[str]:
UpperCAmelCase_ : Any = []
for minterm in minterms:
UpperCAmelCase_ : List[Any] = ''
for _ in range(UpperCamelCase ):
UpperCAmelCase_ : List[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase )
return temp
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> bool:
UpperCAmelCase_ : Tuple = list(UpperCamelCase )
UpperCAmelCase_ : List[Any] = list(UpperCamelCase )
UpperCAmelCase_ : Dict = 0
for i in range(len(UpperCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> list[str]:
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[int] = [0] * len(UpperCamelCase )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = -1
for j in range(len(UpperCamelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ : Union[str, Any] = j
if count == 1:
UpperCAmelCase_ : Optional[int] = 1
for i in range(len(UpperCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase ) ):
UpperCAmelCase_ : Optional[Any] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = -1
UpperCAmelCase_ : Union[str, Any] = 0
for i in range(len(UpperCamelCase ) ):
UpperCAmelCase_ : List[str] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ : Any = count_n
UpperCAmelCase_ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase ) ):
UpperCAmelCase_ : Optional[Any] = 0
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> list[list[int]]:
UpperCAmelCase_ : str = [[0 for x in range(len(UpperCamelCase ) )] for x in range(len(UpperCamelCase ) )]
for i in range(len(UpperCamelCase ) ):
UpperCAmelCase_ : Optional[Any] = prime_implicants[i].count('_' )
for j in range(len(UpperCamelCase ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,UpperCamelCase ):
UpperCAmelCase_ : List[str] = 1
return chart
def SCREAMING_SNAKE_CASE( ) -> None:
UpperCAmelCase_ : Dict = int(input('Enter the no. of variables\n' ) )
UpperCAmelCase_ : Tuple = [
float(UpperCamelCase )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
UpperCAmelCase_ : Tuple = decimal_to_binary(UpperCamelCase ,UpperCamelCase )
UpperCAmelCase_ : List[Any] = check(UpperCamelCase )
print('Prime Implicants are:' )
print(UpperCamelCase )
UpperCAmelCase_ : Any = prime_implicant_chart(UpperCamelCase ,UpperCamelCase )
UpperCAmelCase_ : List[str] = selection(UpperCamelCase ,UpperCamelCase )
print('Essential Prime Implicants are:' )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 471 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
a_, r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class lowercase ( a_ ):
def _snake_case ( self , _snake_case) -> np.ndarray:
if self.framework == "tf":
UpperCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
UpperCAmelCase_ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case)
else:
raise ValueError('Unsupported framework')
return masked_index
def _snake_case ( self , _snake_case) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = self.get_masked_index(_snake_case)
UpperCAmelCase_ : int = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _snake_case ( self , _snake_case) -> int:
if isinstance(_snake_case , _snake_case):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_snake_case)
def _snake_case ( self , _snake_case , _snake_case=None , **_snake_case) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCAmelCase_ : Optional[Any] = self.framework
UpperCAmelCase_ : str = self.tokenizer(_snake_case , return_tensors=_snake_case)
self.ensure_exactly_one_mask_token(_snake_case)
return model_inputs
def _snake_case ( self , _snake_case) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.model(**_snake_case)
UpperCAmelCase_ : Optional[Any] = model_inputs['input_ids']
return model_outputs
def _snake_case ( self , _snake_case , _snake_case=5 , _snake_case=None) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ : Optional[int] = target_ids.shape[0]
UpperCAmelCase_ : Union[str, Any] = model_outputs['input_ids'][0]
UpperCAmelCase_ : Optional[Any] = model_outputs['logits']
if self.framework == "tf":
UpperCAmelCase_ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
UpperCAmelCase_ : Tuple = outputs.numpy()
UpperCAmelCase_ : Dict = outputs[0, masked_index, :]
UpperCAmelCase_ : List[str] = stable_softmax(_snake_case , axis=-1)
if target_ids is not None:
UpperCAmelCase_ : str = tf.gather_nd(tf.squeeze(_snake_case , 0) , target_ids.reshape(-1 , 1))
UpperCAmelCase_ : str = tf.expand_dims(_snake_case , 0)
UpperCAmelCase_ : int = tf.math.top_k(_snake_case , k=_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ : int = outputs[0, masked_index, :]
UpperCAmelCase_ : str = logits.softmax(dim=-1)
if target_ids is not None:
UpperCAmelCase_ : List[str] = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = probs.topk(_snake_case)
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
UpperCAmelCase_ : Union[str, Any] = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ : str = target_ids[p].tolist()
UpperCAmelCase_ : Union[str, Any] = p
# Filter padding out:
UpperCAmelCase_ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ : Tuple = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p]), 'sequence': sequence}
row.append(_snake_case)
result.append(_snake_case)
if single_mask:
return result[0]
return result
def _snake_case ( self , _snake_case , _snake_case=None) -> List[str]:
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ : List[str] = [targets]
try:
UpperCAmelCase_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : List[Any] = []
for target in targets:
UpperCAmelCase_ : Optional[int] = vocab.get(_snake_case , _snake_case)
if id_ is None:
UpperCAmelCase_ : int = self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['input_ids']
if len(_snake_case) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it')
continue
UpperCAmelCase_ : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
UpperCAmelCase_ : Union[str, Any] = list(set(_snake_case))
if len(_snake_case) == 0:
raise ValueError('At least one target must be provided when passed.')
UpperCAmelCase_ : Dict = np.array(_snake_case)
return target_ids
def _snake_case ( self , _snake_case=None , _snake_case=None) -> Dict:
UpperCAmelCase_ : str = {}
if targets is not None:
UpperCAmelCase_ : Dict = self.get_target_ids(_snake_case , _snake_case)
UpperCAmelCase_ : Optional[int] = target_ids
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.')
return {}, {}, postprocess_params
def __call__( self , _snake_case , *_snake_case , **_snake_case) -> Optional[int]:
UpperCAmelCase_ : Any = super().__call__(_snake_case , **_snake_case)
if isinstance(_snake_case , _snake_case) and len(_snake_case) == 1:
return outputs[0]
return outputs
| 471 | 1 |
import os
from datetime import datetime as dt
from github import Github
snake_case : str = [
'good first issue',
'feature request',
'wip',
]
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Github(os.environ['GITHUB_TOKEN'] )
_SCREAMING_SNAKE_CASE = g.get_repo('huggingface/accelerate' )
_SCREAMING_SNAKE_CASE = repo.get_issues(state='open' )
for issue in open_issues:
_SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] ,key=lambda UpperCAmelCase__ : i.created_at ,reverse=UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = comments[0] if len(UpperCAmelCase__ ) > 0 else None
_SCREAMING_SNAKE_CASE = dt.utcnow()
_SCREAMING_SNAKE_CASE = (current_time - issue.updated_at).days
_SCREAMING_SNAKE_CASE = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 605 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowercase :
"""simple docstring"""
def __magic_name__ ( self , A_ , A_ , A_ )-> Union[str, Any]:
return None
class __lowercase :
"""simple docstring"""
def __magic_name__ ( self , A_ , A_ , A_ , A_ )-> int:
return None
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__ ( self )-> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'tf' , 12 , **A_ )
@require_torch
@slow
def __magic_name__ ( self )-> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A_ , 'pt' , 12 , **A_ )
@require_torch
@slow
def __magic_name__ ( self )-> List[str]:
from transformers import BertModel
_SCREAMING_SNAKE_CASE = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(A_ ) )
vocab_file.flush()
_SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(A_ ) ) )
model.save_pretrained(A_ )
self._test_export(A_ , 'pt' , 12 , A_ )
@require_tf
@slow
def __magic_name__ ( self )-> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(A_ , 'tf' , 12 , **A_ )
_SCREAMING_SNAKE_CASE = quantize(Path(A_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __magic_name__ ( self )-> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_SCREAMING_SNAKE_CASE = self._test_export(A_ , 'pt' , 12 , **A_ )
_SCREAMING_SNAKE_CASE = quantize(A_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __magic_name__ ( self , A_ , A_ , A_ , A_=None , **A_ )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_SCREAMING_SNAKE_CASE = Path(A_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A_ , A_ , A_ , A_ , A_ , **A_ )
return path
except Exception as e:
self.fail(A_ )
@require_torch
@require_tokenizers
@slow
def __magic_name__ ( self )-> List[str]:
from transformers import BertModel
_SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __magic_name__ ( self )-> Optional[int]:
from transformers import TFBertModel
_SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(A_ , A_ , 'tf' )
def __magic_name__ ( self , A_ , A_ , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(A_ , A_ )
_SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = infer_shapes(A_ , A_ )
# Assert all variables are present
self.assertEqual(len(A_ ) , len(A_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , A_ )
self.assertSequenceEqual(variable_names[3:] , A_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask', 'token_type_ids']
_SCREAMING_SNAKE_CASE = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , A_ , A_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(A_ ) , set(A_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , A_ , A_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A_ ) , 1 )
self.assertEqual(len(A_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __magic_name__ ( self )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 605 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__A : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
# get prompt text embeddings
UpperCamelCase : List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
UpperCamelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = text_embeddings.shape
UpperCamelCase : Tuple = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Any = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : List[Any] = negative_prompt
UpperCamelCase : int = text_input_ids.shape[-1]
UpperCamelCase : Tuple = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Dict = uncond_embeddings.shape[1]
UpperCamelCase : Union[str, Any] = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : Union[str, Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Any = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
UpperCamelCase : str = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase : Union[str, Any] = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCamelCase : Union[str, Any] = latents_reference.to(self.device )
UpperCamelCase : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase : Optional[int] = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase : Any = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase : List[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase : str = 0 if dx < 0 else dx
UpperCamelCase : Optional[int] = 0 if dy < 0 else dy
UpperCamelCase : Optional[int] = max(-dx , 0 )
UpperCamelCase : Any = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase : Dict = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Union[str, Any] = {}
if accepts_eta:
UpperCamelCase : Any = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : int = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase : Optional[int] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Tuple = noise_pred.chunk(2 )
UpperCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 1 / 0.18215 * latents
UpperCamelCase : List[str] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase : str = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase , UpperCamelCase : Optional[Any] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase : Any = None
if output_type == "pil":
UpperCamelCase : str = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 499 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Tuple = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 499 | 1 |
'''simple docstring'''
import math
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : List[str] = 2
lowerCamelCase_ : List[Any] = int(math.sqrt(_lowercase ) ) # Size of every segment
lowerCamelCase_ : Dict = [True] * (end + 1)
lowerCamelCase_ : Optional[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
lowerCamelCase_ : Union[str, Any] = False
start += 1
prime += in_prime
lowerCamelCase_ : List[Any] = end + 1
lowerCamelCase_ : Tuple = min(2 * end , _lowercase )
while low <= n:
lowerCamelCase_ : Tuple = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase_ : Any = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
lowerCamelCase_ : List[str] = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase_ : Dict = high + 1
lowerCamelCase_ : List[Any] = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 703 |
'''simple docstring'''
def lowercase_ ( _lowercase = 1_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Dict = 2**power
lowerCamelCase_ : Union[str, Any] = str(_lowercase )
lowerCamelCase_ : Union[str, Any] = list(_lowercase )
lowerCamelCase_ : Dict = 0
for i in list_num:
sum_of_num += int(_lowercase )
return sum_of_num
if __name__ == "__main__":
__lowercase : Optional[Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__lowercase : Tuple = solution(power)
print('''Sum of the digits is: ''', result)
| 357 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 382 | from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
__UpperCAmelCase : Tuple = atan((1 - flattening) * tan(radians(snake_case__ ) ) )
__UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(snake_case__ ) ) )
__UpperCAmelCase : List[Any] = radians(snake_case__ )
__UpperCAmelCase : Dict = radians(snake_case__ )
# Equation
__UpperCAmelCase : Tuple = sin((phi_a - phi_a) / 2 )
__UpperCAmelCase : Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCAmelCase : Union[str, Any] = sqrt(sin_sq_phi + (cos(snake_case__ ) * cos(snake_case__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 716 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result | 664 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ,return_dict=A ).to(A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCAmelCase__ : Union[str, Any] = tokenizer("""Hello there""" ,return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : Dict = tokenizer("""Hi I am""" ,return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : str = model(input_ids.to(A ) ,labels=labels.to(A ) ).loss
UpperCAmelCase__ : Optional[Any] = -(labels.shape[-1] * loss.item())
UpperCAmelCase__ : Tuple = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 65 |
"""simple docstring"""
__UpperCAmelCase = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image', 'mask_image'])
__UpperCAmelCase = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset(['input_tokens'])
__UpperCAmelCase = frozenset(['input_tokens'])
| 65 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''table-transformer'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : List[str]=100 , UpperCAmelCase__ : Tuple=6 , UpperCAmelCase__ : Optional[Any]=2_048 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Tuple=2_048 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]="relu" , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : str="sine" , UpperCAmelCase__ : Optional[int]="resnet50" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : List[Any] , ) ->List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.get('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
return self.d_model
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
return 12
| 702 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = XGLMConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=14 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[Any]=0.02 , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = TFXGLMModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=True) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
A__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0])
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__)
A__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
A__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
A__ = '''left'''
# use different length sentences to test batching
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding=UpperCAmelCase__)
A__ = inputs['''input_ids''']
A__ = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
A__ = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
A__ = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=12)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence])
| 177 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Tuple = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 416 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BioGptTokenizer
SCREAMING_SNAKE_CASE_ = False
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowerCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = '''lower'''
__lowerCamelCase = ['''low''', '''er</w>''']
__lowerCamelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowerCamelCase = tokens + ['''<unk>''']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_snake_case )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_snake_case )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 316 | 0 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( lowercase__ : str = N ) -> int:
UpperCamelCase__ :str = -sys.maxsize - 1
for i in range(len(lowercase__ ) - 12 ):
UpperCamelCase__ :Tuple = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
UpperCamelCase__ :Union[str, Any] = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 383 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase = False
UpperCamelCase = False
def A ( lowercase__ : Namespace ) -> Dict:
return TrainCommand(lowercase__ )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@staticmethod
def __a ( lowerCamelCase__ :ArgumentParser ):
UpperCamelCase__ :int = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=lowerCamelCase__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=lowerCamelCase__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=lowerCamelCase__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=lowerCamelCase__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=lowerCamelCase__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=lowerCamelCase__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=lowerCamelCase__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=lowerCamelCase__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=lowerCamelCase__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=lowerCamelCase__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=lowerCamelCase__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=lowerCamelCase__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self :int , lowerCamelCase__ :Namespace ):
UpperCamelCase__ :List[Any] = logging.get_logger("""transformers-cli/training""" )
UpperCamelCase__ :Optional[Any] = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=lowerCamelCase__ )
UpperCamelCase__ :int = args.output
UpperCamelCase__ :Optional[Any] = args.column_label
UpperCamelCase__ :Any = args.column_text
UpperCamelCase__ :Tuple = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
UpperCamelCase__ :int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
UpperCamelCase__ :Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCamelCase__ :List[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
UpperCamelCase__ :List[str] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCamelCase__ :Optional[int] = args.validation_split
UpperCamelCase__ :int = args.train_batch_size
UpperCamelCase__ :str = args.valid_batch_size
UpperCamelCase__ :Any = args.learning_rate
UpperCamelCase__ :List[str] = args.adam_epsilon
def __a ( self :Optional[int] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :List[Any] ):
raise NotImplementedError
def __a ( self :Dict ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 383 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase_ : Tuple = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowerCamelCase_ : Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowerCamelCase_ : str = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase_ : Union[str, Any] = False
@property
def _snake_case ( self : Optional[int]):
return 32
@property
def _snake_case ( self : int):
return 32
@property
def _snake_case ( self : Tuple):
return self.time_input_dim
@property
def _snake_case ( self : Tuple):
return self.time_input_dim * 4
@property
def _snake_case ( self : Dict):
return 1_00
@property
def _snake_case ( self : List[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ :Optional[Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE_ :List[str] = UNetaDConditionModel(**UpperCAmelCase)
return model
@property
def _snake_case ( self : Any):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Tuple):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ :Optional[int] = VQModel(**self.dummy_movq_kwargs)
return model
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.dummy_unet
SCREAMING_SNAKE_CASE_ :Dict = self.dummy_movq
SCREAMING_SNAKE_CASE_ :Optional[int] = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
SCREAMING_SNAKE_CASE_ :str = DDIMScheduler(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _snake_case ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Any=0):
SCREAMING_SNAKE_CASE_ :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase)).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
UpperCAmelCase)
# create init_image
SCREAMING_SNAKE_CASE_ :Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase)).to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ :List[Any] = Image.fromarray(np.uinta(UpperCAmelCase)).convert("RGB").resize((2_56, 2_56))
# create hint
SCREAMING_SNAKE_CASE_ :Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase)).to(UpperCAmelCase)
if str(UpperCAmelCase).startswith("mps"):
SCREAMING_SNAKE_CASE_ :Any = torch.manual_seed(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.Generator(device=UpperCAmelCase).manual_seed(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :int = "cpu"
SCREAMING_SNAKE_CASE_ :int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ :Optional[int] = self.pipeline_class(**UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = pipe.to(UpperCAmelCase)
pipe.set_progress_bar_config(disable=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pipe(**self.get_dummy_inputs(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Dict = output.images
SCREAMING_SNAKE_CASE_ :str = pipe(
**self.get_dummy_inputs(UpperCAmelCase) , return_dict=UpperCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ :str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ :List[Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : List[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
SCREAMING_SNAKE_CASE_ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
SCREAMING_SNAKE_CASE_ :Optional[Any] = init_image.resize((5_12, 5_12))
SCREAMING_SNAKE_CASE_ :List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
SCREAMING_SNAKE_CASE_ :List[str] = torch.from_numpy(np.array(UpperCAmelCase)).float() / 255.0
SCREAMING_SNAKE_CASE_ :List[str] = hint.permute(2 , 0 , 1).unsqueeze(0)
SCREAMING_SNAKE_CASE_ :Tuple = "A robot, 4k photo"
SCREAMING_SNAKE_CASE_ :Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_ :Optional[int] = pipeline.to(UpperCAmelCase)
pipeline.set_progress_bar_config(disable=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.Generator(device="cpu").manual_seed(0)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = pipe_prior(
UpperCAmelCase , image=UpperCAmelCase , strength=0.85 , generator=UpperCAmelCase , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE_ :List[Any] = pipeline(
image=UpperCAmelCase , image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , hint=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="np" , )
SCREAMING_SNAKE_CASE_ :str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase)
| 631 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["OwlViTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class _SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] )->Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(3 , 4 )
__SCREAMING_SNAKE_CASE : Dict = nn.BatchNormad(4 )
__SCREAMING_SNAKE_CASE : Any = nn.Linear(4 , 5 )
def __snake_case ( self : str , UpperCamelCase : Any )->List[Any]:
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) )
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __snake_case ( self : Optional[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(UpperCamelCase : Any ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCamelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def __snake_case ( self : int )->Optional[int]:
__SCREAMING_SNAKE_CASE : str = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(UpperCamelCase : Tuple , UpperCamelCase : int ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = mock_training_loop_function("hello" )
self.assertListEqual(UpperCamelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __snake_case ( self : Union[str, Any] )->Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCamelCase : Dict ):
pass
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __snake_case ( self : str )->List[str]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(UpperCamelCase : Tuple ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __snake_case ( self : Union[str, Any] )->Dict:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function(1_2_8 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __snake_case ( self : Tuple )->Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(UpperCamelCase : Any ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __snake_case ( self : Dict )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = torch.cuda.memory_allocated()
__SCREAMING_SNAKE_CASE : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = release_memory(UpperCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase )
| 447 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""vqvae"""]
def __init__( self : Tuple , UpperCamelCase : AutoencoderKL , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Mel , UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , )->Tuple:
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def __snake_case ( self : List[Any] )->int:
return 5_0 if isinstance(self.scheduler , UpperCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : int = 1 , UpperCamelCase : str = None , UpperCamelCase : np.ndarray = None , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = None , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : float = 0 , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : torch.Tensor = None , UpperCamelCase : torch.Tensor = None , UpperCamelCase : Any=True , )->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__SCREAMING_SNAKE_CASE : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
__SCREAMING_SNAKE_CASE : Any = noise
__SCREAMING_SNAKE_CASE : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.mel.audio_slice_to_image(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["sample"]
else:
__SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
if isinstance(self.scheduler , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__SCREAMING_SNAKE_CASE : Tuple = (images * 2_5_5).round().astype("uint8" )
__SCREAMING_SNAKE_CASE : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
__SCREAMING_SNAKE_CASE : List[str] = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def __snake_case ( self : Dict , UpperCamelCase : List[Image.Image] , UpperCamelCase : int = 5_0 )->np.ndarray:
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE : Dict = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( UpperCamelCase : torch.Tensor , UpperCamelCase : torch.Tensor , UpperCamelCase : float )->torch.Tensor:
__SCREAMING_SNAKE_CASE : List[str] = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 447 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> str:
_UpperCamelCase : Any = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> List[str]:
_UpperCamelCase : Any = None
_UpperCamelCase : str = 20
_UpperCamelCase : Optional[int] = self._get_uniform_logits(batch_size=2 , length=lowercase__ )
# tweak scores to not be uniform anymore
_UpperCamelCase : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Optional[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Optional[int] = jax.nn.softmax(lowercase__ , axis=-1 )
_UpperCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(lowercase__ , scores.copy() , cur_len=lowercase__ ) , axis=-1 )
_UpperCamelCase : Tuple = jax.nn.softmax(temp_dist_warper_smoother(lowercase__ , scores.copy() , cur_len=lowercase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : str = 2
# create ramp distribution
_UpperCamelCase : Any = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Union[str, Any] = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Tuple = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(lowercase__ , lowercase__ , cur_len=lowercase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = 10
_UpperCamelCase : str = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Union[str, Any] = np.exp(top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Any = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : int = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> int:
_UpperCamelCase : int = 20
_UpperCamelCase : str = 4
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
# check that min length is applied at length 5
_UpperCamelCase : Tuple = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : Dict = 5
_UpperCamelCase : Union[str, Any] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Any = min_dist_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Tuple = 15
_UpperCamelCase : str = min_dist_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : List[str] = 4
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : List[str] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Union[str, Any] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Any = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : Optional[Any] = 3
_UpperCamelCase : List[Any] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : str = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Tuple = 20
_UpperCamelCase : List[str] = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : Tuple = 5
_UpperCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Any = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : List[Any] = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Dict = 3
_UpperCamelCase : Optional[Any] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Tuple = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[str] = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : List[str] = 15
_UpperCamelCase : int = 2
_UpperCamelCase : str = 1
_UpperCamelCase : Dict = 15
# dummy input_ids and scores
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, sequence_length) , lowercase__ )
_UpperCamelCase : List[str] = input_ids.copy()
_UpperCamelCase : Dict = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : List[Any] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
_UpperCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
_UpperCamelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
_UpperCamelCase : List[Any] = 10
# no processor list
_UpperCamelCase : List[str] = temp_dist_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : List[str] = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Tuple = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Tuple = min_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Tuple = bos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Tuple = eos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
# with processor list
_UpperCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : str = processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : Dict = 10
_UpperCamelCase : int = 15
_UpperCamelCase : str = 2
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Optional[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , lowercase__ )
_UpperCamelCase : Tuple = input_ids.copy()
_UpperCamelCase : List[Any] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Union[str, Any] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Any = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
_UpperCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
_UpperCamelCase : str = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : List[Any] = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : str = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Tuple = min_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Tuple = bos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Dict = eos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
return scores
_UpperCamelCase : str = jax.jit(lowercase__ )
_UpperCamelCase : Union[str, Any] = jax.jit(lowercase__ )
_UpperCamelCase : Any = jitted_run_no_processor_list(lowercase__ , lowercase__ , lowercase__ )
_UpperCamelCase : List[Any] = jitted_run_processor_list(lowercase__ , lowercase__ , lowercase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 184 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : Optional[int] = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = ['PerceiverFeatureExtractor']
lowerCAmelCase_ : Any = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Dict=10 , lowerCAmelCase__ : List[Any]=[10, 20, 30, 40] , lowerCAmelCase__ : List[Any]=[1, 1, 2, 1] , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]="relu" , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : str = embeddings_size
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFRegNetModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFRegNetForImageClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : str = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : str = False
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __lowercase ( self : int ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowercase ( self : Dict ):
"""simple docstring"""
pass
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_type
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str={} ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ ).to_tuple()
def recursive_check(lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
if isinstance(lowerCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase__ , lowerCAmelCase__ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {'''output_hidden_states''': True} )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __lowercase ( self : Any ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = TFRegNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[str] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
| 464 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a__ : Dict = 'src/transformers'
a__ : Tuple = 'docs/source/en'
a__ : str = '.'
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start prompt.
UpperCAmelCase = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a__ : List[Any] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
a__ : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
a__ : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a__ : Union[str, Any] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
a__ : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = 2 if text == '''✅''' or text == '''❌''' else len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = (width - text_length) // 2
UpperCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCAmelCase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = None
if attr_name.endswith('''Tokenizer''' ):
UpperCAmelCase = slow_tokenizers
UpperCAmelCase = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
UpperCAmelCase = fast_tokenizers
UpperCAmelCase = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = tf_models
UpperCAmelCase = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = flax_models
UpperCAmelCase = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = pt_models
UpperCAmelCase = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCAmelCase = True
break
# Try again after removing the last word in the name
UpperCAmelCase = ''''''.join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
UpperCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCAmelCase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCAmelCase = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
UpperCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
UpperCAmelCase = '''|''' + '''|'''.join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
UpperCAmelCase = {True: '''✅''', False: '''❌'''}
for name in model_names:
UpperCAmelCase = model_name_to_prefix[name]
UpperCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict=False ) -> Any:
"""simple docstring"""
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
UpperCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a__ : Optional[int] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase : Optional[datasets.Features] = None
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
import pyspark
def generate_fn():
UpperCamelCase : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
UpperCamelCase : List[str] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
UpperCamelCase : Optional[Any] = partition_df.collect()
UpperCamelCase : Union[str, Any] = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , _A , _A=None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = df
UpperCamelCase : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : int = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def _a ( self ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__lowerCAmelCase : Dict = SparkConfig
def __init__( self , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
import pyspark
UpperCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase : List[str] = df
UpperCamelCase : Optional[Any] = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def _a ( self ):
'''simple docstring'''
def create_cache_and_write_probe(_A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
UpperCamelCase : List[Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _a ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , _A ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a ( self , _A ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(_A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
UpperCamelCase : Tuple = self.df.count()
UpperCamelCase : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase : List[Any] = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase : int = min(_A , int(approx_total_size / max_shard_size ) )
UpperCamelCase : Optional[Any] = self.df.repartition(_A )
def _a ( self , _A , _A , _A , ):
'''simple docstring'''
import pyspark
UpperCamelCase : List[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
UpperCamelCase : Dict = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
UpperCamelCase : List[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase : Optional[int] = self.config.features
UpperCamelCase : Optional[Any] = self._writer_batch_size
UpperCamelCase : Union[str, Any] = self._fs.storage_options
def write_arrow(_A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase : Optional[int] = pyspark.TaskContext().taskAttemptId()
UpperCamelCase : str = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
UpperCamelCase : str = 0
UpperCamelCase : Union[str, Any] = writer_class(
features=_A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
UpperCamelCase : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase , UpperCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
UpperCamelCase : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
UpperCamelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
UpperCamelCase , UpperCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
UpperCamelCase : int = (
self.df.mapInArrow(_A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a ( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ):
'''simple docstring'''
self._validate_cache_dir()
UpperCamelCase : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
UpperCamelCase : Optional[int] = not is_remote_filesystem(self._fs )
UpperCamelCase : Dict = os.path.join if is_local else posixpath.join
UpperCamelCase : Optional[int] = """-TTTTT-SSSSS-of-NNNNN"""
UpperCamelCase : Dict = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCamelCase : Any = path_join(self._output_dir , _A )
UpperCamelCase : Any = 0
UpperCamelCase : List[str] = 0
UpperCamelCase : str = 0
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
UpperCamelCase : str = total_num_examples
UpperCamelCase : Tuple = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCamelCase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A , _A , _A , ):
rename(
_A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
UpperCamelCase : str = []
UpperCamelCase : List[Any] = 0
for i in range(len(_A ) ):
UpperCamelCase , UpperCamelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
UpperCamelCase : int = 0
UpperCamelCase : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(_A , """""" ) , )
def _a ( self , _A , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 102 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def _a ( self , _A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def _a ( self ):
'''simple docstring'''
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self , _A , _A=1_6_0_0_0 , _A = 5_1_2 , _A = 5_1_2 , _A = 5_0 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ):
'''simple docstring'''
UpperCamelCase : str = self.speech_processor.feature_extractor(
_A , return_tensors="""pt""" , sampling_rate=_A ).input_features.to(self.device )
UpperCamelCase : List[Any] = self.speech_model.generate(_A , max_length=4_8_0_0_0_0 )
UpperCamelCase : Optional[int] = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
UpperCamelCase : Tuple = 1
elif isinstance(_A , _A ):
UpperCamelCase : List[Any] = len(_A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
# get prompt text embeddings
UpperCamelCase : Dict = self.tokenizer(
_A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : int = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[int] = text_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : str = [""""""] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
f""" {type(_A )}.""" )
elif isinstance(_A , _A ):
UpperCamelCase : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : Any = negative_prompt
UpperCamelCase : Optional[int] = text_input_ids.shape[-1]
UpperCamelCase : List[str] = self.tokenizer(
_A , padding="""max_length""" , max_length=_A , truncation=_A , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : List[Any] = uncond_embeddings.shape[1]
UpperCamelCase : Dict = uncond_embeddings.repeat(1 , _A , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Tuple = torch.randn(_A , generator=_A , device="""cpu""" , dtype=_A ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Tuple = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : str = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCamelCase : Optional[Any] = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
UpperCamelCase : Optional[Any] = 1 / 0.1_82_15 * latents
UpperCamelCase : Union[str, Any] = self.vae.decode(_A ).sample
UpperCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 102 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 306 |
def lowercase ( _a = 2000000 ) -> int:
UpperCAmelCase_: List[str] = [0 for i in range(n + 1 )]
UpperCAmelCase_: str = 1
UpperCAmelCase_: Union[str, Any] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,_a ):
UpperCAmelCase_: Union[str, Any] = 1
UpperCAmelCase_: Optional[Any] = 0
for i in range(_a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""") | 306 | 1 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 623 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __a , )
class _lowercase ( __a ):
_UpperCAmelCase = RobertaConfig
_UpperCAmelCase = '''roberta'''
def __init__( self , A__ ) -> Any:
super().__init__(A__ )
snake_case = RobertaEmbeddings(A__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __a , )
class _lowercase ( __a ):
_UpperCAmelCase = RobertaConfig
_UpperCAmelCase = '''roberta'''
def __init__( self , A__ ) -> str:
super().__init__(A__ )
snake_case = config.num_labels
snake_case = config.num_hidden_layers
snake_case = DeeRobertaModel(A__ )
snake_case = nn.Dropout(config.hidden_dropout_prob )
snake_case = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=-1 , A__=False , ) -> Union[str, Any]:
snake_case = self.num_layers
try:
snake_case = self.roberta(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
snake_case = outputs[1]
snake_case = self.dropout(A__ )
snake_case = self.classifier(A__ )
snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case = e.message
snake_case = e.exit_layer
snake_case = outputs[0]
if not self.training:
snake_case = entropy(A__ )
snake_case = []
snake_case = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case = MSELoss()
snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case = []
for highway_exit in outputs[-1]:
snake_case = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case = MSELoss()
snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case = CrossEntropyLoss()
snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
snake_case = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case = (loss,) + outputs
if not self.training:
snake_case = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 44 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44 | 1 |
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
lowerCamelCase__: List[Any] =generate_pascal_triangle(__a )
for row_idx in range(__a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCAmelCase_ ( __a ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
lowerCamelCase__: list[list[int]] =[]
for current_row_idx in range(__a ):
lowerCamelCase__: Any =populate_current_row(__a , __a )
triangle.append(__a )
return triangle
def lowerCAmelCase_ ( __a , __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: Dict =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase__ , lowerCamelCase__: Dict =1, 1
for current_col_idx in range(1 , __a ):
calculate_current_element(
__a , __a , __a , __a )
return current_row
def lowerCAmelCase_ ( __a , __a , __a , __a , ) -> None:
"""simple docstring"""
lowerCamelCase__: Dict =triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase__: List[Any] =triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase__: int =above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( __a ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
lowerCamelCase__: list[list[int]] =[[1]]
for row_index in range(1 , __a ):
lowerCamelCase__: str =[0] + result[-1] + [0]
lowerCamelCase__: List[str] =row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase__: str =sum(divmod(__a , 2 ) )
lowerCamelCase__: Union[str, Any] =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCamelCase__: Optional[int] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase__: Optional[int] =row_first_half + row_second_half
result.append(__a )
return result
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a , __a ) -> None:
lowerCamelCase__: Union[str, Any] =F"""{func.__name__}({value})"""
lowerCamelCase__: Any =timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 59 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Any = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Dict = """BlipImageProcessor"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """AutoTokenizer"""
def __init__( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = False
super().__init__(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: str = self.image_processor
def __call__( self : Dict , UpperCamelCase__ : ImageInput = None , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , )-> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
# Get only text
if images is None:
__lowerCAmelCase: Optional[int] = self.tokenizer
__lowerCAmelCase: List[str] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
__lowerCAmelCase: Tuple = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__)
if text is not None:
__lowerCAmelCase: Tuple = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
__lowerCAmelCase: str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__)
return encoding_image_processor
def lowercase_ ( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any)-> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any])-> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.tokenizer.model_input_names
__lowerCAmelCase: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 346 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Any, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
A__ = TOKENIZER_CLASSES
else:
A__ = {tokenizer_name: getattr(UpperCAmelCase_, tokenizer_name + "Fast" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
A__ = TOKENIZER_CLASSES[tokenizer_name]
A__ = True
if checkpoint_name is None:
A__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A__ = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
A__ = tokenizer_class.from_pretrained(UpperCAmelCase_, force_download=UpperCAmelCase_ )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
A__ , A__ = checkpoint.split("/" )
A__ = os.path.join(UpperCAmelCase_, UpperCAmelCase_ )
elif add_prefix:
A__ = checkpoint
A__ = dump_path
else:
A__ = None
A__ = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A__ = file_path.split(UpperCAmelCase_ )[-1][0]
if next_char == "/":
A__ = os.path.join(UpperCAmelCase_, UpperCAmelCase_ )
A__ = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
A__ = tokenizer.save_pretrained(
UpperCAmelCase_, legacy_format=UpperCAmelCase_, filename_prefix=UpperCAmelCase_ )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCAmelCase_ )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
UpperCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 702 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : str = (DEISMultistepScheduler,)
A__ : List[str] = (("num_inference_steps", 2_5),)
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self ) -> List[Any]:
pass
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> str:
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def snake_case__ ( self ) -> Tuple:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
A__ = dummy_past_residuals[: scheduler.config.solver_order]
A__ = scheduler.timesteps[5]
A__ = scheduler.timesteps[6]
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ = DEISMultistepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self ) -> List[str]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[int]:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type="deis" , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Dict:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
A__ = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def snake_case__ ( self ) -> Optional[int]:
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def snake_case__ ( self ) -> int:
A__ = self.full_loop()
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case__ ( self ) -> List[str]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 562 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCamelCase ( unittest.TestCase , __snake_case ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
A = load_tool("""text-classification""" )
self.tool.setup()
A = load_tool("""text-classification""" , remote=a__ )
def _UpperCAmelCase ( self ) -> Any:
A = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(a__ , """positive""" )
def _UpperCAmelCase ( self ) -> Tuple:
A = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(a__ , """positive""" )
def _UpperCAmelCase ( self ) -> Any:
A = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(a__ , """positive""" )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(a__ , """positive""" )
| 641 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('num_inference_steps', 5_0),)
def _UpperCAmelCase ( self , **a__ ) -> List[str]:
A = {"""num_train_timesteps""": 1000}
config.update(**a__ )
return config
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> Tuple:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> int:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[:]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **a__ ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> Any:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A = dummy_past_residuals[:]
A = scheduler.timesteps[5]
A = scheduler.timesteps[6]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a__ , time_step=a__ )
def _UpperCAmelCase ( self ) -> Any:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a__ , time_step=a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.full_loop()
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 641 | 1 |
"""simple docstring"""
import re
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> bool:
_SCREAMING_SNAKE_CASE : List[str] = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(lowerCamelCase__, lowerCamelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 295 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295 | 1 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = set()
lowercase = []
def parse_line(lowerCAmelCase__ :Optional[Any] ):
for line in fp:
if isinstance(_a , _a ):
lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_a ) > 0:
lowercase = '''\n'''.join(_a )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(_a )
buffer.clear()
continue
else:
lowercase = line.strip()
buffer.append(_a )
if from_gh:
for filename in os.listdir(_a ):
lowercase = os.path.join(_a , _a )
if not os.path.isdir(_a ):
# read the file
if filename != "warnings.txt":
continue
with open(_a ) as fp:
parse_line(_a )
else:
try:
with zipfile.ZipFile(_a ) as z:
for filename in z.namelist():
if not os.path.isdir(_a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_a ) as fp:
parse_line(_a )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
lowercase = set()
lowercase = [os.path.join(_a , _a ) for p in os.listdir(_a ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_a , _a ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
return values.split(""",""" )
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__lowerCAmelCase : List[str] =parser.parse_args()
__lowerCAmelCase : List[Any] =args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase : Dict =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase : Tuple =extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase : List[Any] =sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 359 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : int , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Any ):
if tokenize_kwargs is None:
snake_case_ : str = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
snake_case_ : int = truncation
snake_case_ : Union[str, Any] = tokenize_kwargs
snake_case_ : int = {}
if return_tensors is not None:
snake_case_ : str = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , **lowercase_ : int ):
snake_case_ : Union[str, Any] = self.framework
snake_case_ : List[Any] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
return model_inputs
def _snake_case ( self : Union[str, Any] , lowercase_ : Tuple ):
snake_case_ : Union[str, Any] = self.model(**lowercase_ )
return model_outputs
def _snake_case ( self : str , lowercase_ : str , lowercase_ : List[str]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *lowercase_ : int , **lowercase_ : Dict ):
return super().__call__(*lowercase_ , **lowercase_ )
| 123 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
__snake_case = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Union[str, Any] = GPTaTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : List[str] = kwargs.pop("add_bos_token" , UpperCamelCase__ )
snake_case : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
snake_case : int = getattr(UpperCamelCase__ , pre_tok_state.pop("type" ) )
snake_case : Optional[Any] = add_prefix_space
snake_case : Any = pre_tok_class(**UpperCamelCase__ )
snake_case : Any = add_prefix_space
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
snake_case : Any = kwargs.get("is_split_into_words" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
snake_case : int = kwargs.get("is_split_into_words" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[int]:
'''simple docstring'''
snake_case : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
snake_case : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 117 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = LayoutLMTokenizer
__UpperCAmelCase : List[Any] = LayoutLMTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = True
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = "UNwant\u00E9d,running"
snake_case : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class(self.vocab_file )
snake_case : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
| 117 | 1 |
def lowercase ( __A : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
snake_case : Dict = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
snake_case : Dict = 1
if upper_limit > 0:
snake_case : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__A ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__lowercase : Union[str, Any] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 36 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase ( __A : str , __A : str , **__A : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AutoConfig.from_pretrained(__A , **__A )
snake_case : Tuple = AutoModelForSeqaSeqLM.from_config(__A )
model.save_pretrained(__A )
AutoTokenizer.from_pretrained(__A ).save_pretrained(__A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 36 | 1 |
from __future__ import annotations
from typing import TypedDict
class _lowerCAmelCase ( __snake_case ):
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
def _lowerCamelCase ( a_ : str):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase))]
def _lowerCamelCase ( a_ : str):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
lowerCamelCase :int = all_rotations(_lowerCamelCase)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCamelCase :BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(_lowerCamelCase),
}
return response
def _lowerCamelCase ( a_ : str , a_ : int):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
lowerCamelCase :int = int(_lowerCamelCase)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(_lowerCamelCase):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
lowerCamelCase :Tuple = [""""""] * len(_lowerCamelCase)
for _ in range(len(_lowerCamelCase)):
for i in range(len(_lowerCamelCase)):
lowerCamelCase :Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__ = """Provide a string that I will generate its BWT transform: """
A__ = input(entry_msg).strip()
A__ = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
A__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 718 | import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.